diff --git a/.gitignore b/.gitignore index 6208c37c43..6219354ab8 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,7 @@ yarn-error.log* .idea +*.pyc *.info.mdx .tool-versions diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000..00ad71fba1 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "typescript.tsdk": "node_modules\\typescript\\lib" +} \ No newline at end of file diff --git a/README.md b/README.md index f9825bbc9c..b860b3f648 100644 --- a/README.md +++ b/README.md @@ -166,7 +166,7 @@ print("hello world") #### Code Example -![Create account code example](./readme-imgs/code-example.png) +![Create account code example](./static/img/github/code-example.png) `` is a code snippet component. You can use this component when you want to include snippets for more than one language. See an example diff --git a/crowdin.yaml b/crowdin.yaml index d66aae9834..2710926be9 100644 --- a/crowdin.yaml +++ b/crowdin.yaml @@ -16,7 +16,7 @@ files: translation: /i18n/%two_letters_code%/docusaurus-plugin-content-docs/current/**/%original_file_name% ignore : ['**/*.api.mdx', '**/*.info.mdx', '**/*.tag.mdx', '**/*.schema.mdx', '**/*.json'] # Meeting Notes Blog Markdown files - - source: /meeting-notes/**/* + - source: /meetings/**/* translation: /i18n/%two_letters_code%/docusaurus-plugin-content-blog/**/%original_file_name% # Pages Markdown files - source: /src/pages/**/* diff --git a/docs/build/apps/wallet/sep24.mdx b/docs/build/apps/wallet/sep24.mdx index 88e4eab8bf..70f4f4f2e1 100644 --- a/docs/build/apps/wallet/sep24.mdx +++ b/docs/build/apps/wallet/sep24.mdx @@ -712,7 +712,7 @@ onMessage: (transaction) => { // Signs it with the account key pair transferTransaction.sign(keypair); - // Finally submits it to the stellar network. This stellar.submitTransaction() + // Finally submits it to the Stellar network. This stellar.submitTransaction() // function handles '504' status codes (timeout) by keep retrying it until // submission succeeds or we get a different error. try { @@ -743,7 +743,7 @@ onMessage: (transaction) => { type: 'https://stellar.org/horizon-errors/transaction_failed', title: 'Transaction Failed', status: 400, - detail: 'The transaction failed when submitted to the stellar network. + detail: 'The transaction failed when submitted to the Stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://developers.stellar.org/docs/data/apis/horizon/api-reference/errors/http-status-codes/horizon-specific/transaction-failed', diff --git a/docs/data/apis/horizon/admin-guide/monitoring.mdx b/docs/data/apis/horizon/admin-guide/monitoring.mdx index 47377bef08..a6e7930e97 100644 --- a/docs/data/apis/horizon/admin-guide/monitoring.mdx +++ b/docs/data/apis/horizon/admin-guide/monitoring.mdx @@ -23,8 +23,8 @@ There are numerous application metrics keys emitted by Horizon at runtime, encod - `go_`: golang specfic runtime performance - `horizon_txsub_`: attributes of Horizon transaction submission sub system if enabled. -- `horizon_stellar_core_`: runtime attributes of stellar network reported by the captive core. -- `horizon_order_book_`: runtime attributes of the in memory order book maintained by Horizon of the current stellar network +- `horizon_stellar_core_`: runtime attributes of Stellar network reported by the captive core. +- `horizon_order_book_`: runtime attributes of the in memory order book maintained by Horizon of the current Stellar network - `horizon_log_`: counters of how many log messages printed at each severity level - `horizon_ingest_`: performance measurements and stateful aspects of Horizon's internal ingestion sub system - `horizon_http_`: statistics and measurements of Horizon's HTTP API service, all aspects of request/response load and timings. diff --git a/docs/data/apis/horizon/api-reference/errors/http-status-codes/horizon-specific/before-history.mdx b/docs/data/apis/horizon/api-reference/errors/http-status-codes/horizon-specific/before-history.mdx index 949e1a91ea..ac31fc9ed1 100644 --- a/docs/data/apis/horizon/api-reference/errors/http-status-codes/horizon-specific/before-history.mdx +++ b/docs/data/apis/horizon/api-reference/errors/http-status-codes/horizon-specific/before-history.mdx @@ -14,7 +14,7 @@ The `before_history` error returns a [`410` error code](https://developer.mozill "type": "https://stellar.org/horizon-errors/before_history", "title": "Data Requested Is Before Recorded History", "status": 410, - "detail": "This horizon instance is configured to only track a portion of the stellar network's latest history. This request is asking for results prior to the recorded history known to this horizon instance." + "detail": "This horizon instance is configured to only track a portion of the Stellar network's latest history. This request is asking for results prior to the recorded history known to this horizon instance." } ``` diff --git a/docs/data/apis/horizon/api-reference/errors/http-status-codes/horizon-specific/transaction-failed.mdx b/docs/data/apis/horizon/api-reference/errors/http-status-codes/horizon-specific/transaction-failed.mdx index 0b77401d1f..f31621b5b2 100644 --- a/docs/data/apis/horizon/api-reference/errors/http-status-codes/horizon-specific/transaction-failed.mdx +++ b/docs/data/apis/horizon/api-reference/errors/http-status-codes/horizon-specific/transaction-failed.mdx @@ -20,7 +20,7 @@ In almost every case, this error indicates that the transaction submitted in the "type": "https://stellar.org/horizon-errors/transaction_failed", "title": "Transaction Failed", "status": 400, - "detail": "The transaction failed when submitted to the stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/guides/concepts/list-of-operations.html", + "detail": "The transaction failed when submitted to the Stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/guides/concepts/list-of-operations.html", "extras": { "envelope_xdr": "AAAAAgAAAADdfhHDs4Vaug6p8Oxb1QRjNRdJt3pYKKBVhFHrEgd9QAAAAAoAEi4YAAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACwAAAAAAAAAAAAAAAAAAAAESB31AAAAAQFhc/liVXbLk3NtB2BtweFJ064JdDIfrTSrqKMhb1oIRK+0PSyvjzZTkRCJmQY3bHNXYNuepa2TF7aBdibrb1gI=", "result_codes": { diff --git a/docs/data/apis/horizon/api-reference/errors/response.mdx b/docs/data/apis/horizon/api-reference/errors/response.mdx index a37ce06ef7..6d1668b09e 100644 --- a/docs/data/apis/horizon/api-reference/errors/response.mdx +++ b/docs/data/apis/horizon/api-reference/errors/response.mdx @@ -47,7 +47,7 @@ When any error occurs, Horizon responds with a JSON document with the below attr "type": "https://stellar.org/horizon-errors/transaction_failed", "title": "Transaction Failed", "status": 400, - "detail": "The transaction failed when submitted to the stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/learn/concepts/list-of-operations.html", + "detail": "The transaction failed when submitted to the Stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/learn/concepts/list-of-operations.html", "extras": { "envelope_xdr": "AAAAANPRjCD1iCti3hovsrrz6aSAjmp263grVr6+mI3SQSkcAAAAZAAPRLgAAAADAAAAAAAAAAAAAAABAAAAAQAAAACuSL9OciKkFztj4d3zuadl20HHObu+7qJenBxHPrMayQAAAAUAAAABAAAAANPRjCD1iCti3hovsrrz6aSAjmp263grVr6+mI3SQSkcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtJBKRwAAABA1N0iqDAgqS6+3RIZGoNB9OXrY3wd/nLruXYi+eiTt4jn94fLVLwAw6jJCaK+qxStwO7c4kP6u5k0RPbuYC55CT6zGskAAABAiUGCNCS4pGlfcRmi82kbralzcFlTQAFzLyfUrYGn3RtQ4p/7TUwAqIanVoWGfEqzIJo64ZT+mYtJ72BfI+FiDg==", "result_codes": { diff --git a/docs/data/apis/horizon/api-reference/errors/result-codes/operations.mdx b/docs/data/apis/horizon/api-reference/errors/result-codes/operations.mdx index 9315203a5d..6caab52f9e 100644 --- a/docs/data/apis/horizon/api-reference/errors/result-codes/operations.mdx +++ b/docs/data/apis/horizon/api-reference/errors/result-codes/operations.mdx @@ -38,7 +38,7 @@ These are Result Codes that communicate success (200) or failure (400) at the op "type": "https://stellar.org/horizon-errors/transaction_failed", "title": "Transaction Failed", "status": 400, - "detail": "The transaction failed when submitted to the stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/learn/concepts/list-of-operations.html", + "detail": "The transaction failed when submitted to the Stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/learn/concepts/list-of-operations.html", "extras": { "envelope_xdr": "AAAAANPRjCD1iCti3hovsrrz6aSAjmp263grVr6+mI3SQSkcAAAAZAAPRLgAAAADAAAAAAAAAAAAAAABAAAAAQAAAACuSL9OciKkFztj4d3zuadl20HHObu+7qJenBxHPrMayQAAAAUAAAABAAAAANPRjCD1iCti3hovsrrz6aSAjmp263grVr6+mI3SQSkcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtJBKRwAAABA1N0iqDAgqS6+3RIZGoNB9OXrY3wd/nLruXYi+eiTt4jn94fLVLwAw6jJCaK+qxStwO7c4kP6u5k0RPbuYC55CT6zGskAAABAiUGCNCS4pGlfcRmi82kbralzcFlTQAFzLyfUrYGn3RtQ4p/7TUwAqIanVoWGfEqzIJo64ZT+mYtJ72BfI+FiDg==", "result_codes": { diff --git a/docs/data/apis/horizon/api-reference/errors/result-codes/transactions.mdx b/docs/data/apis/horizon/api-reference/errors/result-codes/transactions.mdx index 242f6c0376..7adb63d757 100644 --- a/docs/data/apis/horizon/api-reference/errors/result-codes/transactions.mdx +++ b/docs/data/apis/horizon/api-reference/errors/result-codes/transactions.mdx @@ -56,7 +56,7 @@ These are Result Codes that communicate success (200) or failure (400) at the tr "type": "https://stellar.org/horizon-errors/transaction_failed", "title": "Transaction Failed", "status": 400, - "detail": "The transaction failed when submitted to the stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/learn/concepts/list-of-operations.html", + "detail": "The transaction failed when submitted to the Stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/learn/concepts/list-of-operations.html", "extras": { "envelope_xdr": "AAAAANPRjCD1iCti3hovsrrz6aSAjmp263grVr6+mI3SQSkcAAAAZAAPRLgAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArki/TnIipBc7Y+Hd87mnZdtBxzm7vu6iXpwcRz6zGskAAAAAAAAAAAAHoSAAAAAAAAAAAdJBKRwAAABANWeKuRYFmBm1lrMQqMvhbSouwL270SnxcTtv1XI4Y+uVe4yw4Jq7/43EoxwLbRh/pC3V4WfOZRzDqwsTyEztAA==", "result_codes": { diff --git a/docs/data/indexers/build-your-own/ingest-sdk/README.mdx b/docs/data/indexers/build-your-own/ingest-sdk/README.mdx index 23a8f08a72..d1486e0f22 100644 --- a/docs/data/indexers/build-your-own/ingest-sdk/README.mdx +++ b/docs/data/indexers/build-your-own/ingest-sdk/README.mdx @@ -20,7 +20,7 @@ Obtain the final state of [ledger entries](../../../../learn/fundamentals/stella Ledger entries are cryptographically signed as part of each ledger and therefore represent the trusted, cumulative state at a point in time for [assets](../../../../learn/fundamentals/stellar-data-structures/assets.mdx) related to an [account](../../../../learn/fundamentals/stellar-data-structures/accounts.mdx) or [contract](../../../../learn/fundamentals/contract-development/storage/persisting-data.mdx). Examples of asset types: - trustlines which hold token balances -- offers which hold bid and asks on the [stellar network DEX](../../../../learn/fundamentals/liquidity-on-stellar-sdex-liquidity-pools.mdx#sdex) +- offers which hold bid and asks on the [Stellar DEX](../../../../learn/fundamentals/liquidity-on-stellar-sdex-liquidity-pools.mdx#sdex) - contract data which holds key/value stores for contracts ### Ledger Metadata diff --git a/docs/platforms/anchor-platform/api-reference/platform/rpc/anchor-platform.openrpc.json b/docs/platforms/anchor-platform/api-reference/platform/rpc/anchor-platform.openrpc.json index 8fa9f9e324..db64091b51 100644 --- a/docs/platforms/anchor-platform/api-reference/platform/rpc/anchor-platform.openrpc.json +++ b/docs/platforms/anchor-platform/api-reference/platform/rpc/anchor-platform.openrpc.json @@ -27,7 +27,7 @@ { "name": "do_stellar_payment", "summary": "Submits a Stellar payment", - "description": "Submits a payment to a stellar network by a custody service.", + "description": "Submits a payment to the Stellar network by a custody service.", "paramStructure": "by-name", "tags": [ { @@ -386,7 +386,7 @@ { "name": "do_stellar_refund", "summary": "Submits a Stellar refund", - "description": "Submits a refund payment to a stellar network by a custody service", + "description": "Submits a refund payment to the Stellar network by a custody service", "paramStructure": "by-name", "tags": [ { diff --git a/docs/platforms/anchor-platform/sep-guide/sep24/example.mdx b/docs/platforms/anchor-platform/sep-guide/sep24/example.mdx index 3ab52fa259..e8959fda65 100644 --- a/docs/platforms/anchor-platform/sep-guide/sep24/example.mdx +++ b/docs/platforms/anchor-platform/sep-guide/sep24/example.mdx @@ -138,7 +138,7 @@ const sessions = {}; * Create an authenticated session for the user. * * Return a session token to be used in future requests as well as the - * user data. Note that you may not have a user for the stellar account + * user data. Note that you may not have a user for the Stellar account * provided, in which case the user should go through your onboarding * process. */ diff --git a/docs/tools/cli/cookbook/stellar-keys.mdx b/docs/tools/cli/cookbook/stellar-keys.mdx index 36d2dc88f6..cd41446cad 100644 --- a/docs/tools/cli/cookbook/stellar-keys.mdx +++ b/docs/tools/cli/cookbook/stellar-keys.mdx @@ -1,7 +1,7 @@ --- title: Stellar Keys hide_table_of_contents: true -description: Manage stellar keys +description: Manage Stellar keys --- Stub file: the real file is generated in build time by `yarn stellar-cli:build`. diff --git a/docs/tools/ramps/moneygram.mdx b/docs/tools/ramps/moneygram.mdx index e64501b0ac..f9dda8ea6d 100644 --- a/docs/tools/ramps/moneygram.mdx +++ b/docs/tools/ramps/moneygram.mdx @@ -6,4 +6,4 @@ sidebar_position: 10 MoneyGram Ramps is a MoneyGram product that enables users of third-party applications, such as crypto wallets and exchanges, to cash-in (deposit) and cash-out (withdrawal) USDC on Stellar. -[Dive into the MoneyGram Ramps docs](https://developer.moneygram.com/moneygram-developer/docs/integrate-moneygram-ramps) to learn about the technical requirements for integrating MoneyGram Ramps into an existing wallet or creating a new wallet application. +Dive into the [MoneyGram Ramps docs](https://developer.moneygram.com/moneygram-developer/docs/integrate-moneygram-ramps) to learn about the technical requirements for integrating MoneyGram Ramps into an existing wallet or creating a new wallet application. diff --git a/docs/validators/admin-guide/installation.mdx b/docs/validators/admin-guide/installation.mdx index 9e59b92406..aa2b01eb04 100644 --- a/docs/validators/admin-guide/installation.mdx +++ b/docs/validators/admin-guide/installation.mdx @@ -53,7 +53,7 @@ We publish multiple packages for convenience. | stellar-core | none | installs stellar-core binary, systemd service, logrotate script, documentation | | stellar-core-utils | none | installs useful command line tools (stellar-core-cmd, stellar-core-gap-detect) | | stellar-core-prometheus-exporter | none | installs a Prometheus exporter to facilitate ingesting stellar-core metrics | -| stellar-core-postgres | stellar-core, PostgreSQL | configures a PostgreSQL server, creates a stellar db,role and system user, the default stellar-core configuration contained in this package will connect to the Testnet | +| stellar-core-postgres | stellar-core, PostgreSQL | configures a PostgreSQL server, creates a Stellar DB,role and system user, the default stellar-core configuration contained in this package will connect to the Testnet | | stellar-archivist | none | installs stellar-archivist cli tool for managing stellar-core History archives | To install a chosen package run: diff --git a/docs/validators/admin-guide/publishing-history-archives.mdx b/docs/validators/admin-guide/publishing-history-archives.mdx index f40cca66c5..7c0b2ba221 100644 --- a/docs/validators/admin-guide/publishing-history-archives.mdx +++ b/docs/validators/admin-guide/publishing-history-archives.mdx @@ -282,7 +282,7 @@ stellar-archivist scan file:///mnt/xvdf/stellar-core-archive/node_001 2019/04/25 12:15:41 No missing buckets referenced in range [0x0000003f, 0x000041bf] ``` -Finally, you can start your stellar Core instance once again. +Finally, you can start your Stellar Core instance once again. ```bash systemctl start stellar-core diff --git a/docusaurus.config.ts b/docusaurus.config.ts index 3f7570a17f..c4d1c14ceb 100644 --- a/docusaurus.config.ts +++ b/docusaurus.config.ts @@ -100,7 +100,7 @@ const config: Config = { "classic", { blog: { - path: 'meeting-notes', + path: 'meetings', blogTitle: 'Meeting Notes', blogDescription: 'Notes and recordings from the Stellar protocol & developers meetings', blogSidebarTitle: 'All meetings', @@ -108,6 +108,9 @@ const config: Config = { postsPerPage: 12, routeBasePath: 'meetings', onUntruncatedBlogPosts: 'ignore', + showReadingTime: false, + authorsMapPath: 'authors.yml', + exclude: ['**/README.md'], }, docs: { showLastUpdateTime: true, diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-01-18.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-01-18.mdx index 20e613c5d5..d759ffe8a4 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-01-18.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-01-18.mdx @@ -1,6 +1,6 @@ --- title: 2024-01-18 -authors: naman +authors: naman-kumar tags: - protocol --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-02-01.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-02-01.mdx index 59d504a616..f9c8e9c25d 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-02-01.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-02-01.mdx @@ -1,6 +1,6 @@ --- title: 2024-02-01 -authors: naman +authors: naman-kumar tags: - protocol --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-02-09.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-02-09.mdx index b1e8bbe5ab..1b4247e79d 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-02-09.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-02-09.mdx @@ -5,12 +5,9 @@ tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Hilo de agenda de Discord](https://discord.com/channels/897514728459468821/1204462856037470248) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-02-15.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-02-15.mdx index 5a2ae63022..71f6d1fe05 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-02-15.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-02-15.mdx @@ -1,16 +1,13 @@ --- title: 2024-02-15 -authors: naman +authors: naman-kumar tags: - protocol --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Discord agenda thread](https://discord.com/channels/897514728459468821/1207385360116490360) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-02-22.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-02-22.mdx index f5a327c121..4015cf133e 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-02-22.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-02-22.mdx @@ -5,12 +5,9 @@ tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Hilo de la agenda de Discord](https://discord.com/channels/897514728459468821/1209582245824823337) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-02-29.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-02-29.mdx index f1bd4a8efd..4e44574d91 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-02-29.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-02-29.mdx @@ -1,16 +1,13 @@ --- title: 2024-02-29 -authors: naman +authors: naman-kumar tags: - protocol --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Hilo de agenda en Discord](https://discord.com/channels/897514728459468821/1212118102565855243) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-03-07.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-03-07.mdx index a6977f3eed..afa3ef6fe9 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-03-07.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-03-07.mdx @@ -5,12 +5,9 @@ tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Hilo de agenda de Discord](https://discord.com/channels/897514728459468821/911254664576643122/1215404506964172890) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-03-14.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-03-14.mdx index da1f078113..15e2b99272 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-03-14.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-03-14.mdx @@ -1,16 +1,13 @@ --- title: 2024-03-14 -authors: naman +authors: naman-kumar tags: - protocol --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Discord agenda thread](https://discord.com/channels/897514728459468821/1217193723612368926) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-03-21.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-03-21.mdx index 064c9d1059..7bb6672d8c 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-03-21.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-03-21.mdx @@ -5,12 +5,9 @@ tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Hilo de agenda de Discord](https://discord.com/channels/897514728459468821/1219381314931917000) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-03-28.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-03-28.mdx index 34ae91d52c..b9e3a47310 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-03-28.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-03-28.mdx @@ -1,16 +1,13 @@ --- title: 2024-03-28 -authors: naman +authors: naman-kumar tags: - protocol --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Agenda thread](https://github.com/stellar/stellar-protocol/discussions/1475) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-04-04.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-04-04.mdx index d4caba9d80..ce5147055e 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-04-04.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-04-04.mdx @@ -1,29 +1,21 @@ --- title: 2024-04-04 -authors: naman +authors: naman-kumar tags: - developer --- +import DriveVideo from "@site/src/components/DriveVideo"; + La grabación de hoy tiene dos partes. Los primeros 12 minutos son solo audio. Los próximos 45 minutos también tienen video. Ten en cuenta que las diapositivas se compartieron en el chat de discord mediante la pantalla compartida, debido a dificultades técnicas. Parte 1 (solo audio): - + Parte 2 (video): - + [Hilo de Agenda de Discord](https://discord.com/channels/897514728459468821/1224408179363024918) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-04-11.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-04-11.mdx index 404a5638ae..2b11d233cc 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-04-11.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-04-11.mdx @@ -1,16 +1,13 @@ --- title: 2024-04-11 -authors: naman +authors: naman-kumar tags: - protocol --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Piyal de Freighter discutió la propuesta para estandarizar la interfaz de billetera. Los puntos clave de la discusión se capturan a continuación. Para notas completas, por favor ve la grabación; y también consulta la propuesta y la publicación en las discusiones de github. diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-04-18.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-04-18.mdx index 481ecb0618..5630818af5 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-04-18.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-04-18.mdx @@ -5,12 +5,9 @@ tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Discord agenda thread](https://discord.com/channels/897514728459468821/911254664576643122/1215404506964172890) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-04-25.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-04-25.mdx index 651cfe9bbe..e33e1c29f8 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-04-25.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-04-25.mdx @@ -1,16 +1,13 @@ --- title: 2024-04-25 -authors: naman +authors: naman-kumar tags: - protocol --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + 1. Garand discussed changes to the State Archival proposal based on feedback received at Meridian 2023. Los cambios propuestos son: diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-05-02.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-05-02.mdx index 5ce9abb6f1..6534c0d076 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-05-02.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-05-02.mdx @@ -1,16 +1,13 @@ --- title: 2024-05-02 -authors: naman +authors: naman-kumar tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Hilo de Agenda de Discord](https://discord.com/channels/897514728459468821/1234887262530048010/1234887262530048010) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-05-09.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-05-09.mdx index f931d0b705..14264e6c86 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-05-09.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-05-09.mdx @@ -1,16 +1,13 @@ --- title: 2024-05-09 -authors: naman +authors: naman-kumar tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + 1. Tyler desarrolló una aplicación de votación utilizando claves de acceso para firmar la transacción, que es una implementación de la función de verificación secp256r1. 2. Mostró una implementación multiplataforma (web y móvil) y demostró que las claves de acceso son la interfaz perfecta entre los contratos web3 y los mecanismos de autenticación web2 a los que la mayoría de los usuarios finales están acostumbrados. diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-06-13.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-06-13.mdx index 02684ad202..b235248a85 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-06-13.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-06-13.mdx @@ -5,12 +5,9 @@ tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + 1. Tyler creó Super Peach, una aplicación web3 que utiliza claves de acceso para firmar transacciones. Demostró cómo se pueden usar las claves de acceso en flujos de autorización y cómo se pueden usar para firmar transacciones. - Código: https://github.com/kalepail/superpeach diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-06-20.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-06-20.mdx index 0498aef6bd..d0a4ab8b6c 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-06-20.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-06-20.mdx @@ -1,16 +1,13 @@ --- title: 2024-06-20 -authors: naman +authors: naman-kumar tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + 1. [Kirckz discute Meru](https://docs.google.com/presentation/d/1Fu4AkB0mrvOkK6UDFJHgKwCV-Ul4JRF-xPqTYJ3CQqw), una aplicación de servicios financieros para freelancers y trabajadores remotos en América Latina. 2. Él comparte su experiencia integrando Meru con Blend, un protocolo de liquidez primitiva para Stellar. diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-06-27.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-06-27.mdx index ecb2b316e2..e0decaa5ee 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-06-27.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-06-27.mdx @@ -1,16 +1,13 @@ --- title: 2024-06-27 -authors: naman +authors: naman-kumar tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + 1. [Chad](https://github.com/chadoh) y [Willem](https://github.com/willemneal) de [Aha Labs](https://github.com/AhaLabs) discuten las actualizaciones del nuevo y mejorado [stellar-cli](https://github.com/stellar/stellar-cli) 2. Algunos aspectos destacados incluyen el cambio del nombre 'soroban' a 'stellar' al usar la herramienta CLI y la adición de nuevos comandos. diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-07-11.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-07-11.mdx index 886abc9aa6..24141f2e45 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-07-11.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-07-11.mdx @@ -1,16 +1,13 @@ --- title: 2024-07-11 -authors: naman +authors: naman-kumar tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + 1. 2. [Las diapositivas](https://docs.google.com/presentation/d/1QsCwFLFcDF4RmNIwtSSnNrUfZb0RM0kLxOOxC7ENY5M/edit#slide=id.g2cb5821e4de_1_1143) están disponibles públicamente y son legibles de forma asíncrona. 2. 4. Comparte tus consultas y publica preguntas en #hubble en discord de Stellar, que es un canal dedicado a temas relacionados con datos. diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-07-18.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-07-18.mdx index b2d378f6ab..25b36c0d16 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-07-18.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-07-18.mdx @@ -1,16 +1,13 @@ --- title: 2024-07-18 -authors: naman +authors: naman-kumar tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Nota: la primera parte de la llamada se perdió. El video publicado arriba captura la segunda mitad de la llamada donde varios desarrolladores del ecosistema compartieron sus casos de uso y necesidades para una billetera inteligente en Stellar. diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-07-25.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-07-25.mdx index b5f086716d..394cc22892 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-07-25.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-07-25.mdx @@ -1,16 +1,13 @@ --- title: 2024-07-25 -authors: naman +authors: naman-kumar tags: - protocol --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Un desarrollador principal, Dima, discutió la propuesta de añadir soporte para constructores a Soroban, el sistema de contratos inteligentes de Stellar. diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-08-01.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-08-01.mdx index 577f484f1e..8719c40812 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-08-01.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-08-01.mdx @@ -1,16 +1,13 @@ --- title: 2024-08-01 -authors: naman +authors: naman-kumar tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + 1. Piyal demostró que la funcionalidad de intercambio de Freighter ahora se sirve a través de [Soroswap](https://soroswap.finance/). Anteriormente, se servía a través de Stellar Dex. 2. Freighter ha hecho disponibles las instrucciones de integración [aquí](https://github.com/stellar/freighter/blob/d248f2ad0aa03da72ea6eeaf7907ac0454fdcc72/extension/INTEGRATING_SOROSWAP.MD?plain=1#L2). diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-08-08.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-08-08.mdx index de73b96938..9f68e1a9f4 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-08-08.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-08-08.mdx @@ -1,15 +1,12 @@ --- title: 2024-08-12 -authors: naman +authors: naman-kumar tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + 1. Tdep discutió sobre Zephyr, un entorno de ejecución desarrollada sobre el indexador Mercury. También explicó ejemplos que demuestran cómo Zephyr puede simplificar el desarrollo de dapps. diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-08-15.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-08-15.mdx index 86fa9355b7..e67a616d7e 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-08-15.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-08-15.mdx @@ -1,16 +1,13 @@ --- title: 2024-08-15 -authors: julian +authors: julian-martinez tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + 1. @Soiled y @Robeart de Orbit hablaron sobre usar Blend para crear stablecoins descentralizadas para todas las monedas bajo el Protocolo Orbit, utilizando un pegkeeper descentralizado para mantener su precio y aprovechando estas stablecoins y billeteras inteligentes para crear un exchange perpetuo sin libro de órdenes, llevando Forex a Stellar diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-08-22.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-08-22.mdx index 46b3c0fe1e..0683ab5d61 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-08-22.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-08-22.mdx @@ -1,16 +1,13 @@ --- title: 2024-08-23 -authors: naman +authors: naman-kumar tags: - protocol --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + [Discord agenda thread](https://discord.com/channels/897514728459468821/900374272751591424/1275577430043525204) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-08-29.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-08-29.mdx index dbec1aba0f..64a4d96b34 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-08-29.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-08-29.mdx @@ -1,16 +1,13 @@ --- title: 2024-08-29 -authors: naman +authors: naman-kumar tags: - protocol --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1278045556211716171) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-09-05.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-09-05.mdx index fa8ddb9b57..9849b60103 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-09-05.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-09-05.mdx @@ -1,16 +1,13 @@ --- title: 2024-09-05 -authors: anataliocs +authors: chris-anatalio tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1280678171053789317) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-09-12.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-09-12.mdx index f835ee57b1..7807a39c5f 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-09-12.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-09-12.mdx @@ -1,16 +1,13 @@ --- title: 2024-09-12 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Agenda: [Hilo de Discord](https://discord.com/channels/897514728459468821/900374272751591424/1282934024892973077) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-09-19.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-09-19.mdx index d1b7e63988..733c38b4a6 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-09-19.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-09-19.mdx @@ -1,16 +1,13 @@ --- title: 2024-09-19 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Agenda: [hilo de Discord](https://discord.com/channels/897514728459468821/900374272751591424/1285627254130610297) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-09-26.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-09-26.mdx index b7dce413e5..584d44505f 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-09-26.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-09-26.mdx @@ -1,16 +1,13 @@ --- title: 2024-09-26 -authors: anataliocs +authors: chris-anatalio tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1288890126038208532) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-10-24.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-10-24.mdx index 37d2a36fb0..62a447916b 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-10-24.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-10-24.mdx @@ -1,16 +1,13 @@ --- title: 2024-10-24 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1298362698123182080) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-11-14.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-11-14.mdx index 36ad5ccc65..cfffdcf906 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-11-14.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-11-14.mdx @@ -1,16 +1,13 @@ --- title: 2024-11-14 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + Agenda: [Discord thread](https://discord.com/events/897514728459468821/1304859059425382553/1306725344870400000) diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-12-05.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-12-05.mdx index 69bb56e4fa..4fbbe12dc6 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-12-05.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-12-05.mdx @@ -1,6 +1,6 @@ --- title: 2024-12-05 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-12-12.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-12-12.mdx index 9ac88bf1f6..a96a624e1a 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-12-12.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-12-12.mdx @@ -1,6 +1,6 @@ --- title: 12 de diciembre de 2024 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2024-12-19.mdx b/i18n/es/docusaurus-plugin-content-blog/2024-12-19.mdx index 8af70bc77d..fec2691759 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2024-12-19.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2024-12-19.mdx @@ -1,6 +1,6 @@ --- title: 2024-12-19 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-01-16.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-01-16.mdx index 0176940e53..b28bb88026 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-01-16.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-01-16.mdx @@ -1,6 +1,6 @@ --- title: 2025-01-16 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-01-23.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-01-23.mdx index 92d42c409a..ebf290b44b 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-01-23.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-01-23.mdx @@ -1,6 +1,6 @@ --- title: 23 de enero de 2025 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-01-30.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-01-30.mdx index 28396ef7b2..204bb3f3b5 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-01-30.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-01-30.mdx @@ -1,6 +1,6 @@ --- title: 30-01-2025 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-02-06.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-02-06.mdx index bc6d51b2bb..67d683348a 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-02-06.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-02-06.mdx @@ -1,6 +1,6 @@ --- title: 2025-02-06 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-02-13.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-02-13.mdx index 68bf00e784..098a0857c4 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-02-13.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-02-13.mdx @@ -1,6 +1,6 @@ --- title: 2025-02-13 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-02-20.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-02-20.mdx index 3778b17e15..363be710c1 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-02-20.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-02-20.mdx @@ -1,6 +1,6 @@ --- title: 2025-02-20 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-02-27.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-02-27.mdx index b4eb1c021d..5c7fd3fa67 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-02-27.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-02-27.mdx @@ -1,6 +1,6 @@ --- title: 2025-02-27 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-03-06.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-03-06.mdx index 615712afa2..9234f8599b 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-03-06.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-03-06.mdx @@ -1,6 +1,6 @@ --- title: 2025-03-06 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-03-27.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-03-27.mdx index 17c9add7fc..ac0aab9303 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-03-27.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-03-27.mdx @@ -1,6 +1,6 @@ --- title: 2025-03-27 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-04-03.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-04-03.mdx index 57ce60f8db..322295ed76 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-04-03.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-04-03.mdx @@ -1,6 +1,6 @@ --- title: 2025-04-03 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-04-10.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-04-10.mdx index 89a65351af..0a9fa08c8c 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-04-10.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-04-10.mdx @@ -1,6 +1,6 @@ --- title: 2025-04-10 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- @@ -9,7 +9,7 @@ import YouTube from "@site/src/components/YouTube"; -Pamphile (más conocido como tupui) es un ingeniero de software senior que trabaja en Bitpanda. Anteriormente trabajó en el equipo que desarrolló Flight Simulator 2020 y creó una empresa de consultoría para trabajar en software de código abierto. +Pamphile (más conocido como Tupui) es un ingeniero de software senior que trabaja en Bitpanda. Anteriormente trabajó en el equipo que desarrolló Flight Simulator 2020 y creó una empresa de consultoría para trabajar en software de código abierto. Ahora está desarrollando su proyecto llamado Tansu en la blockchain Stellar. Tansu es un sistema descentralizado que busca asegurar la cadena de suministro de software y romper las comunidades aisladas mediante el uso de una DAO. diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-04-17.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-04-17.mdx index c003d2db03..3b63ed497e 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-04-17.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-04-17.mdx @@ -1,6 +1,6 @@ --- title: 2025-04-17 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-05-01.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-05-01.mdx index 50774f255a..05107717c2 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-05-01.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-05-01.mdx @@ -1,6 +1,6 @@ --- title: 2025-05-01 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-05-22.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-05-22.mdx index fd5ab1cbe4..0b6f22a2af 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-05-22.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-05-22.mdx @@ -1,6 +1,6 @@ --- title: 2025-05-22 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-07-10.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-07-10.mdx index 8c83b574ed..705e02e3a2 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-07-10.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-07-10.mdx @@ -1,6 +1,6 @@ --- title: 2025-07-10 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-07-17.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-07-17.mdx index 42d7672d81..b44eb958a9 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-07-17.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-07-17.mdx @@ -1,6 +1,6 @@ --- title: 2025-07-17 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-07-24.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-07-24.mdx index 7de0a312be..8e2690b8ff 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-07-24.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-07-24.mdx @@ -1,6 +1,6 @@ --- title: 2025-07-24 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-08-07.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-08-07.mdx index ef66629753..2f0067097c 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-08-07.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-08-07.mdx @@ -1,6 +1,6 @@ --- title: 2025-08-07 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-09-25.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-09-25.mdx index 8d9a050ea0..774a65ff0c 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-09-25.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-09-25.mdx @@ -1,6 +1,6 @@ --- title: 2025-09-25 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-10-02.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-10-02.mdx index 6b3117566b..c4a05835b3 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-10-02.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-10-02.mdx @@ -1,6 +1,6 @@ --- title: 2025-10-02 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-10-09.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-10-09.mdx index 2901f84790..0fb8cbe829 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-10-09.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-10-09.mdx @@ -1,6 +1,6 @@ --- title: 2025-10-09 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-10-16.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-10-16.mdx index 00596a619b..29832a3e1e 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-10-16.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-10-16.mdx @@ -1,6 +1,6 @@ --- title: 2025-10-16 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-10-23.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-10-23.mdx index 4b13b29135..dfd1260e7b 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-10-23.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-10-23.mdx @@ -1,6 +1,6 @@ --- title: 2025-10-23 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-10-30.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-10-30.mdx index dae4eab8de..e071d23dc3 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-10-30.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-10-30.mdx @@ -1,6 +1,6 @@ --- title: 2025-10-30 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/2025-11-06.mdx b/i18n/es/docusaurus-plugin-content-blog/2025-11-06.mdx index 3d5e6048ec..4d1e12834a 100644 --- a/i18n/es/docusaurus-plugin-content-blog/2025-11-06.mdx +++ b/i18n/es/docusaurus-plugin-content-blog/2025-11-06.mdx @@ -1,6 +1,6 @@ --- title: 2025-11-06 -authors: carstenjacobsen +authors: carsten-jacobsen tags: - developer --- diff --git a/i18n/es/docusaurus-plugin-content-blog/authors.yml b/i18n/es/docusaurus-plugin-content-blog/authors.yml deleted file mode 100644 index 88a9843e26..0000000000 --- a/i18n/es/docusaurus-plugin-content-blog/authors.yml +++ /dev/null @@ -1,51 +0,0 @@ -carstenjacobsen: - name: Carsten Jacobsen - title: Defensor Senior de Desarrolladores - url: https://github.com/carstenjacobsen - image_url: https://github.com/carstenjacobsen.png - page: true - socials: - github: carstenjacobsen - x: CarstenJacobsen - linkedin: carstenjacobsendk -elliotfriend: - name: Elliot Voris - title: Defensor Principal de Desarrolladores - url: https://github.com/ElliotFriend - image_url: https://github.com/ElliotFriend.png - page: true - socials: - github: ElliotFriend - x: ElliotFriend - linkedin: elliotfriend -kalepail: - name: Tyler van der Hoeven - title: Director Defensor de Desarrolladores - url: https://github.com/kalepail - image_url: https://github.com/kalepail.png - page: true - socials: - github: kalepail - x: kalepail - linkedin: tyvdh -anataliocs: - name: Chris Anatalio - title: Defensor Senior de Desarrolladores - url: https://github.com/anataliocs - image_url: https://github.com/anataliocs.png - socials: - github: anataliocs -naman: - name: Naman Kumar - title: Gerente de Producto - url: https://github.com/namankumar - image_url: https://github.com/namankumar.png - socials: - github: namankumar -julian: - name: Julian Martinez - title: Defensor Senior de Desarrolladores - url: https://github.com/Julian-dev28 - image_url: https://github.com/Julian-dev28.png - socials: - github: Julian-dev28 diff --git a/i18n/es/docusaurus-plugin-content-docs/current/meetings/2021-10-06.mdx b/i18n/es/docusaurus-plugin-content-docs/current/meetings/2021-10-06.mdx new file mode 100644 index 0000000000..96d12bfce2 --- /dev/null +++ b/i18n/es/docusaurus-plugin-content-docs/current/meetings/2021-10-06.mdx @@ -0,0 +1,137 @@ +--- +title: "Reimagina las Finanzas con Blockchain" +description: "Panel sobre fintech y blockchain en America Latina con voces de la industria." +authors: [roberto-durscki] +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +En este panel, representantes de Visa, Wire y startups fintech hablan sobre tendencias de innovacion financiera en America Latina, incluyendo infraestructura, pagos digitales, cripto y servicios bancarios para pymes. Se discuten oportunidades de inversion, regulacion y como la tecnologia puede mejorar la experiencia del usuario, con consejos para emprendedores y una invitacion al Startup Weekend blockchain en la region. + +
+ Transcripción del Video + +[00:00] Y la vida. Ahora sí señores señoras. Buenos días buenas tardes estamos vivos colores yum y youtube. Entonces solo a. Hola a todos bienvenidos a nuestro panel de imagine o las finanzas con botstein apoyado por visa estelar yo soy roberto dulce y voy a mediar la sesión de hoy a compartiendo el palco conmigo están algunos amigos y expertos del mundo del intec y boateng y que iban a presentarse en un poquito a + +[01:00] Los vídeos mis amigos aquí se presenten en la orden y sus nombres y sus empresas así estarán en la presentación y que cuando sea su slide y hablen un poquito de tu rol tu empresa y arriba de todo como terminaron trabajando con fintech y blogging para poner un poquito de de contexto para para nuestros amigos en la llamada así. Bueno voy a empezar con conmigo para dar el ejemplo me llamó roberto acá en canadá en eeuu me llaman road en brasil vetó pero. Bueno creo que roberto por hoy está bien. Soy el señor director de business de volumen en la fundación estelar y muy sencillo para aquellos que no estén familiarizados con esta ley nosotros somos una de las primeras red del boxing que fue desarrollada específicamente para para pagos y pagos internacionales + +[02:00] Y las funcionalidades y las operaciones desde la zona no si usted es the news y con mi forma de alucinar y el del bolso. Bueno tuvimos un poquito más de música para nosotros siempre. Bueno pero regresen a la presentación estelar. Entonces una brocha para pagos nuestros focos están en este volcán es la organización de activos financieros y remesas internacionales más restan pendientes civiles y si las monedas digitales para para bancos centrales y sólo una idea nosotros habilitamos pagos internacionales 24 por 7 en la velocidad es unos cinco segundos nunca más que eso y el costo una fracción del centavo de dólar. Entonces una red muy muy eficiente para para pagos al respecto desde mi yo + +[03:00] Empecé mi carrera profesional como como un programador en el citibank inglés y tenemos hoy en el lado de una de las personas acá empezó la vida profesional conmigo marcelo ésta está en la audiencia hoy y después de eso hace como 15 años en múltiples bancos en la parte de proyectos de productos digitales el silver rabobank bici y fintech es algo más reciente para el show como 2017 después donde blocks game y se tornó mi pasión y bueno vamos a seguir a todo mi amigo con usted. Ahora muchas. Gracias roberto un placer estar con ustedes. Mi nombre es arnold dos reyes y el vicepresidente de alianzas digitales tintas y dice versos dentro de vista no hay vida somos la red internacional de pagos electrónicos más grandes a nivel del mundo y nuestra + +[04:00] Misión al fin es poder conectar las personas empresas gobiernos e instituciones a poder mover valor no a través de nuestra red electrónica. Llevo justo cuatro años en visas yo vine a visas a través de un emprendimiento y una inversión que había hecho en una empresa que se llama abra y. Ahora estamos creando en el 2015 2016 una billetera justo para enfocarnos en tratar de solucionar ciertas ciertos problemas y consumidores en mercados emergentes principalmente américa latina asia pacifico y antes de eso hace casi seis años en paypal en lo que era el equipo de mercados emergentes y luego responsables para expandir tape a las américas latinas en más de 20 mercados de productos estrategia alianzas todos en mi trayectoria. Ha sido principalmente en servicios financieros muy enfocados en mercados emergentes principalmente américa latina' + +[05:00] Trabajando también en las redes o profes de pagos mastercard y previo a esto inicié mi carrera marca de inversión el equipo de tiempo de bajar críticas pero muy enfocado igual en américa latina y algunos mercados en eeuu y europa y un placer estar con ustedes hoy creo que lo que está pasando en américa latina por lo menos en la trayectoria profesional mía nunca he visto tanto tanto entusiasmo tanta inversión y tanto talento no empezando a algunas empresas verdaderamente increíbles en nuestra región. Gracias por invitarme gases del toldo y bueno siguiendo jose con usted. Buenos días buenas tardes donde están. Gracias y es un placer estar aquí hoy me llamo josé luis núñez tengo 16 años en la arena de pitt está exportando para los internacionales de comercio + +[06:00] Electrónico hoy. Soy el vicepresidente de ventas para wire y nuestra empresa se enfoca en infraestructura financiera y afis que eso por poner desarrolladores y empresarios con proyectos de block change compra de equipo maneras pagos y movimientos de valor muchas. Gracias y es un placer estar aquí hoy maravillas. Gracias o sea de rubén con you. Hola a todos cómo están y. Gracias por tenerme aquí en este panel estoy emocionado de poder especificar va a hacer jamie de cómo pueden utilizar esta tecnología para resolver los problemas financieros de la gente en el mundo es un problema qué. Pues he tenido el honor de d. Pues tratar de resolver activamente. Pues + +[07:00] Ya por seis años trabajando bien con él siguió empezando desde cero desde que el beat cohen costaba 200 dólares para mí me ha quedado unos hasta ahorita. Ha sido una otra transformación personal y técnica gigantesca y no solamente inerte pero el industrial es un momento súper emocionante por qué [Música]. Pues. Ahora sí que nosotros los que proveemos servicios arriba de la tecnología que existe ahorita. Pues somos los primeros quizás llevándole acceso a ella a la gente que la necesita pero el cambio viene en masa ósea y ese es un efecto de red gigante que nos está empujando por nadie está empujando por todos. Entonces es un + +[08:00] Gran momento para empezar a quitarles el último momento para empezar a hacer algo porque después ya va a estar muy tarde en la fiesta no pero. Bueno mucho gusto de estar aquí y de platicarles de quien tiene una cuenta de dólares que la gente en américa latina puede usar para ahorrar hacer pagos internacionales y recibir pagos comprar cosas invertir pero. Bueno es práctico. Gracias y si aún tiene el óbito coin de 200 dólares le compro por 250. Entonces podemos hablar después de la en la presentación pero para hacerlo yo genere este verano los vendía y los vendía mil porque tienes que subir el precio un buen día hoy qué maravilla césar por último pero no menos importante por favor. Hola a todos muy buen día y agradezco la invitación a este panel + +[09:00] Hablando de salud la razón de ser es para significar las relaciones de negocio de negocio business to business como me gusta llamarle y en los últimos años hemos estado enfocados a través de el producto de tarjeta corporativa causando impacto en pequeña y mediana empresa y startups a poder facilitar la forma y girar sobre todo en que se hacen diversos tipos de pagos parte de lo que buscamos es a través del poder tecnológico ofrecer esta plataforma en la cual utilizamos varios componentes que hablaremos a lo largo de esta sesión inteligencia artificial blogging por supuesto y al final lo que traemos es una oferta de valor que busca maximizar el tiempo y más en esta era de + +[10:00] Hiper conectividad en la que buscamos hacer todo debe ser bastante muy rápido un tema de prisa y de constante evolución brevemente hablando de mí del salto a la parte decente he estado trabajando los últimos 18 años en el área de la tecnología el espacio de vittek y. Veo este boom que arnaldo de describir en la región y sobre todo el como cada uno de los países va absorbiendo un pedacito de tinte en la parte del consumo la parte de negocio y fue como voy a decir un game changer para pay porque es tengo que subirme a ese par hugo que va muy muy pero muy rápido y en el cual se puede hacer a través de la innovación mucho mucho cambio en beneficio de + +[11:00] Simplificar las operaciones y sobre todo la vida de todos los aquí presentes maravilla. Bueno bien dicho sea están muchas. Gracias. Entonces. Ahora voy a cerrar la la pantalla acá y vamos para nuestra nuestro página de la charla a por él por supuesto sólo una información el y de sheryl está con nosotros también de textos y más cercanas del final del panel tenemos una sorpresa una información muy muy importante para todos los que están con nosotros hoy al respecto del texto startup weekend que se acontece en la próxima semana. Entonces am. Entonces es que éste permanece con nosotros hasta el final porque es una información importante. Bueno de mi parte yo tengo algunas preguntas acá pero es un panel bien informal y me gustaría que todos los participantes se estrenaban tarde para + +[12:00] Para adicionar un comentarios y preguntas también pero para que podamos empezar a tengo acá seis preguntas que están agrupadas en el panorama de fin thaksin en latinoamérica la ruta clic y como que nosotros a cada uno de ustedes a los dos trajeron acá hoy y para sus empresas y por último algunos consejos y sugerencias para dar para aquellos que están emprendiendo en fin textil y américa latina para comenzar me gustaría que rubén tal vez la primera pregunta para usted' al respecto de américa latina como un todo de su experiencia no sólo con su empresa pero desde que llegó el mundo del fin tech que cree que es como las principales tecnologías financieras que están creciendo en latinoamérica sí y por qué por qué crees que estas tecnologías están creciendo en nuestra región + +[13:00] Pues ve que creo que en américa latina al igual que en todo el mundo nos hemos estado acostumbrados acostumbrando a lo largo de los últimos 20 años a que cada vez más los productos que nosotros consumimos están muy enfocados en nosotros como consumidor tanto así que tu experiencia en tu feed de instagram es muy diferente a la mía sí sí amigos sean tu feed de instagram no sería relevante para mí y creo que ese concepto se ha permeado en todo el mundo y en todas las industrias que existen nosotros como consumidores estamos ahorita esperamos productos que no saben exactamente a nosotros y esos s. Pues s nació con el internet y los + +[14:00] Emprendedores que nacieron con en internet dado a que era muy fácil hacerle envíos desde un producto nuevo una versión nueva más afinada a sus usuarios. Pues están básicamente en busca de crear el producto exacto para la persona exacta y creo que eso. Pues llegó a las finanzas creo que hay muchísima tecnología que deja a muchas empresas ofrecer productos financieros en el internet donde. Gracias a esta dinámica de emprendedurismo donde siempre estás esperando para dar el mejor producto exacto al cliente. Pues están causando que. Ahora haya muchísimas alternativas de productos financieros que están hiper dirigidos a consumidores exactos y van a hacer que. Pues haya un abandono de las instituciones financieras que por + +[15:00] Muchísimo tiempo dieron un producto muy genérico y no estoy diciendo que están mal pero estoy diciendo que. Pues. Gracias a este advent y este como que cruce de d [Música]. Pues premisas haga que vaya a ver productos servicios financieros enfocados en nichos que de otra forma estarían atendidos por un proveedor que no te hablaba exactamente a ti millennial empresa start ópera de impacto que necesita una tarjeta más cool que la que te da la sofom en méxico o que tenemos una cuenta más atinada a ti freelancer latinoamericano te gusta el gaming que te pagan con paypal y en méxico. Pues por eso tuvieron tantas + +[16:00] Aplicaciones para la ley fintech porque. Pues hay muchísimos tipos de mercados en nichos de mercados a quienes les puedes dar un producto bastante atinado y hacer un gran negocio al lograr eso no existe un es un millón de clientes en un banco. Bueno después prestar en un millón de personas world davis en sabes yo seguramente vamos a seguir eso extendiéndose en toda la región va a haber más john bond link de más servicios financieros con más tardes que a nivel local se personalicen o más bien logren crear una oferta que se esté mucho más atinada a niños en particular y eso creo que lo vamos a ver no solamente en toda la región pero en todo el switch de servicios financieros que existen a empresas y a personas + +[17:00] Perfecto. Bueno yo creo que esto es un punto muy importante para nuestra charla de hoy porque el unb and link está permitiendo que nuevas tecnologías empleadas para casos y usos específicos en las finanzas y porque antes teníamos como que los las soluciones agrupadas por los bancos y la experiencia para remiten se es para lo que está haciendo el trabajo way con las tarjetas para las 'semis' es algo que era muy masificados y. Entonces creo que arriba de la de al 'bund' link está también haciendo la oportunidad para que se haga innovación en cada uno de las verticales y en este punto arnold usted tiene con una visión privilegiada porque mira y lo largo corporativo pero también el lado de elvis are dangers y ósea en esta en esta emergencia de nuevas tecnologías financieras con conan band link y hyper fockers como como rubén habló de su parte + +[18:00] He visto las inversiones en los fondos en la otan siguiendo una específica área o tendencia y si hay alguna proyección de visas para para donde estos fondos van aa migran en el futuro próximo donde estarán del mundo indiscutiblemente creo que hay ciertos factores macro en américa latina que están a nuestro favor y hay regiones como el sureste de asia pacifico que también tienen ciertas similaridades no tenemos un mercado con más de 650 millones de personas la mayoría que son entre comillas personas jóvenes el poder acceder a un smartphone que cada día se convierte en algo más accesible en términos de precio y programas de daza y tenemos a la vez los servicios financieros por los jugadores tradicionales + +[19:00] Que es un negocio sumamente rentable pero la misma vez es un negocio donde tres cuatro o cinco instituciones en un país controlan el 70 80 por ciento la banca no y. Entonces creo que a nivel macro cuando uno retrocede un poco y ve estas dinámicas dice. Bueno ahí hay oportunidad para ofrecer los servicios financieros más básicos simplemente de una forma y una experiencia muchísimo mejor que los que hemos estado viendo. Entonces desde la parte de los inversionistas tanto flequillo ex como vista pero también muchos de los inversores nuestros son valen que cuide etc con los colegios invertimos o sea ahí hay un apetito inmenso porque a pesar de que leemos en los centros que niegan que levanta más plaza fulano todas estas rondas de inversión + +[20:00] Son excelentes pero aún nos representan en mi opinión ni siquiera el 10% de la oportunidad de américa latina no existe en mi opinión existe tenemos un mercado que pudieran ver 345 newark no dado el tamaño y lo que hemos visto y diría principalmente en los últimos 24 meses uno es hay más actividades de crisis nos cortaran choca pro está entrando un poquito más agresivo tradicionalmente las de las empresas tipo vista pero también estamos viendo empresas latinoamericanas que no son de servicios financieros algunas son embotelladoras de gaseosas no muy grandes que están haciendo inversiones en billeteras digitales y vimos este caso con conecta en mexico que el primer inversionista grandes. Pues oxxo y a través del grupo gente. Entonces estamos viendo más actividades de la parte difícil y de los fondos tradicionales en lo interesante que estamos viendo. Ahora + +[21:00] Es que hay un hay un grupo de fondos que tradicionalmente han sido lo que le llaman rosas curi son que son fondos que principalmente ya entran en rondas de crecimiento series etcétera by tim cairo etcétera y estos fondos se están. Ahora metiendo algunas veces en las rondas sí no hoy el 'tie-break' y vice para poner poder tener una posición en estas empresas. Entonces estamos viendo lo cual para mí es excelente es una liquidez porque nunca yo he visto por lo menos en mi carrera yo les digo muchas amistades no y los verticales que estamos viendo de mucho interés uno es infraestructura hoy en día no es muy fácil habilitar un + +[22:00] Programa por decir de tarjetas en mexico y verificarlo de una forma muy fácil en brasil y luego lanzar en colombia esa infraestructura no debe ser si hay una demanda aumenta y hay empresas que ya están ya han levantado rondas y hay otras que están en el proceso todos los temas de cristo creo que más recientemente con las bondades del mercado disco en del viso derritió hace dos semanas creo que hay la oportunidad muy grande ahí particularmente con monedas estables todos todas las soluciones alrededor de u s dc y otras otras soluciones muy atractivas y luego en la parte de lo que es servicios financieros básicos no o sea poder hacer un pago por internet no poder comprar en ni comer si no tienes una tarjeta de crédito y poder obtener un préstamo de consumo de servicios básicos que hoy en día se ofrecen los jugadores + +[23:00] Tradicionales pero cuyas experiencias quizás no son las mejores no. Entonces y estamos muy activos en el sector hoy tenemos un portafolio de cinco empresas en proceso dirigencia con varias más y estamos invirtiendo en banca como servicio adquirencia o temas de cómo crecemos aceptación de comercios en la región procesadores emisores muchos relacionados infraestructuras y. Entonces creo que van a ver vamos a ver mucha más tracción de la parte de inversionistas grandes de fondos y a la misma vez un talento que hoy en día estamos viendo un talento increíble salir de empresas como braf y de uber américa latina de strike y en buen momento the new band etc. Entonces creo que las dinámicas están ahí para emprender sin duda + +[24:00] Bueno maravilloso también para yo que no estoy más en américa latina hasta algunos años de escuchar esto de alguien que está más cercano porque es me queda sí feliz en pagado que tenemos a esta liquidez ya esta oportunidad de este habló y creo que las categorías que mencionó van a estar en nuestra en nuestra charla hoy también por ejemplo la infraestructura volumen que se hace de nosotros en escalar estamos hablando mucho con cripto infraestructuras irse a las empresas que hoy project proveen como custodia o iguales de hacer ver si hacer una boleta a hace de siero en eeuu o de otros países es es casi un millón de dólares en inversión en américa latina también una un monto considerable. Entonces a mí está muy próximo también de no está nuestra percepción y ya carlos de. Entonces a muchas. Gracias y y siguiendo con esto también un poquito más en inversiones se hacen ustedes en entrar o hicieron un un aporte muy recientes y también lo hicieron de una + +[25:00] Forma innovadora sí. Entonces puedes hablar un poquito de la experiencia su compañía como com una startup haciendo fundraising en latinoamérica y si esta inversión vino de latinoamérica los de los fondos se dieron más de otras regiones con mucho gusto al final del día ahí hay una cifra interesante que es discutir la población en américa latina' 650 millones de personas que puede traducirse en 650 millones de posibilidades. Entonces frayba justamente estado invirtiendo en la parte de su propia plataforma y. Ahora en lo que fue nuestra nuestra última ronda de inversión trabajamos con diversos jugadores en el sector una combinación de jugadores en norteamérica y también en en la región justamente porque las + +[26:00] Posibilidades que se ven aquí son bastante empleadas y hablamos de un lugar donde hay una necesidad tener este implicó para poder levantar negocios la ciudad no sobre todo en este punto donde hablamos de y creo que hay mencionadas es el momento de poder ser un fundador. Entonces es darle a nosotros a estos fundadores las posibilidades las herramientas para poder tener inclusión financiera y aquí el punto es ok empresa y empresa ve que están haciendo estas estas conexiones. Entonces qué medio ponemos a su disposición para poder lograr este objetivo y a través de esto es básicamente el espíritu que traigo al busca y hemos estado trabajando fuerte con estos inversionistas en esta misión que busca empoderar a las empresas y más + +[27:00] Importantes facilitar les voy a decir su ciclo de vida sobre todo cuando hablamos de tareas que a veces son parte de la cotidianidad como simplificar el ciclo de puertas por pagar poder hacer traspasos en cuestión de minutos o. Bueno si nos vamos a las precisiones tecnológicas segundos no tener esa ventaja competitiva que ayuda a justamente preservar este tema de velocidad hoy a veces hablamos de la tecnología puede causar una disrupción y un cambio y aplicado a la parte de findeter particular vemos esta nueva era de conectar servicios no porque todavía tenemos un ambiente de hablarle legacy y uno nuevo y. Ahora está saliendo justamente una una nueva forma que va a permitir tener un medio que nos permita acelerar. Perfecto y se hace usted habló que qué + +[28:00] Bueno parte de los fondos si bien en norteamérica partes de cine de la región de latinoamérica y creo que una intersección interesante sobre el tópico de nuestro panel el broche y finanzas y también la discusión de rondas de fondos en latinoamérica es que ustedes también innovaron en la forma de hacer la ronda se utilizarán nuevas tecnologías para esto puedes hablar un poquito de eso también al final ahí hay un componente interesante en la parte de tinte no y es en esta era donde todo es un servicio. Ahora estamos jugando fuertemente con la parte de banking a service y poder habilitar plataformas que nos permitan interconectarnos sobre todo poder acelerar el tema de el ultimo arquette esto simplifica mucho el tema de + +[29:00] Operaciones y también introducimos en la parte de cómo acelerar el tema de pagos el famoso profesor del pp vamos a estar. Ha sido una cuestión fuerte para nosotros y todo ello empoderado con un tema de permitirle a nuestros clientes el poder aplicar a una línea crediticia en cuestión de minutos aquí es donde hemos cierto área del camino fuerte porque apalancamos en materia de inteligencia artificial para poder tener un modelo que incluya vectores de crecimiento no es un poco de rentar el status quo y decir que podemos hacer para poder ser un partner fuerte para para estas empresas y sobre + +[30:00] Todo ayudarles a catalizar el tema de la simplicidad en sus operaciones adicional a esto está el tema de manejo de gastos y también integraciones con plataformas de tipo contable que son de nuevo el área en la era de hace service tener esta información cercano a tiempo real y del nuevo resultado en cuestión de segundos y poder estar alineado ok cuál es la inteligencia sobre el dinero que entra pero también el dinero que sale y al final de cuentas poder analizar cuáles son estas tendencias sobre todo donde estoy viendo más estoy gastando más cuáles son mis necesidades y a partir de ahí trabajar con otros departamentos que aportan gran valor edith en la empresa pero sobre + +[31:00] Todo puede decidir donar menos por algo que es la parte de los datos porque aquí en esta nueva era es una palanca muy fuerte para todos nosotros donde la información nos ayuda a tomar mejores decisiones. Perfecto si yo he dicho esa pregunta porque conozco un poco los productos de traer hoy ustedes empezaron con productos que tal vez mirando desde fuera los parecen como productos bancarios normales si un préstamo o una línea de crédito o una tarjeta de crédito pero usted se queda más próximo para entender cómo funciona ustedes están utilizando block cheyne y están utilizando este botón están utilizando esto para habilitar a cambio de remitentes el precio de velocidad mejor s analytics para para mejorarlo crédito. Entonces cree que se conecta un poquito con lo que rubén y terminar nodo hablaron de la de los super focos y cómo hacer mejor la ayuda experience y utilizando nuevas tecnologías algunas veces utilizan productos que ya existen pero existen en + +[32:00] Un formato que que no lo atendía algunos seguimientos de la de la población sí como por ejemplo lo las fintas y las startups y ccoo cree que. Entonces acá se queda como que un mensaje para los que están con nosotros sobre la posibilidad de reinventar los productos existentes con las otras tecnologías que también estén existentes como como blogs en sí y la última pregunta para esta sección me voy a como que ponerla abierta para para quien lo quiera hablar respecto de eso pero no lo hago que se quedó muy fuerte para mí que tenemos espacio para 45 90 o 45 mercado pagos o bichos o que sea así y me pregunto un poco porque no estamos así o sea que se la está me está por hecho en la vanguardia de la revolución de baskin text un club que tal vez se queda corto o que nos falta para para el pleno potencial si. Entonces alguien tiene una opinión algún punto que + +[33:00] Gustaría de traer a nosotros. Pues no sé qué más evidencia podría haber de qué va a haber o hay una revolución y fintech es inminente sabes ahí una cantidad de bestial de dinero fluyendo al espacio hay 15 alternativas de productos financieros que todo el día están compitiendo por tu atención en instagram el think it's quiere decir que usted y creo que va a ser un cambio un poco doloroso porque ahorita el mercado está un poco cerrado no creo que podemos esperar algo diferente + +[34:00] A una acción similar o reacción similar a la que tuvieron los choferes de taxis cuando entró uber y creo que [Música] los taxis en este caso son mucho más poderosos que los taxis y los sindicatos que ellos tienen que de todas formas lograron hacer ciertos cambios y han logrado frenar a ver en varios varias ciudades y creo que. Pues los bancos que tienen muchísimos intereses económicos y van a proteger su moto como yo lo haría porque. Pues yo soy un ejecutivo que tengo una empresa que tengo más bien the game porque no lo voy a hacer que voy a cuidar lo que es mío sabes y digo yo soy muy capitalista para mí todo se vale digo que sean éticas y morales pero si es un modo una licencia se aprovecha de ese modo yo estoy buscando la licencia porque va a ser otro modo y sé + +[35:00] Que no me van a dejar hacer ciertas cosas porque estos delmonte los bancos también vamos a hacer con los bancos y. Pues si se vuelve un caso como el de venezuela pos o si hay casos como ese que ser quien también sirve en esos lugares todo se vale y creo que nosotros tenemos que trabajar en conjunto y braque a nuestros amigos de vista ya estelar porque de cierta forma ustedes tienen una llave ustedes pueden darle a una persona en un país un plástico con el que pueda gastar su dinero es un canal súper poderoso y divisa tiene que decidir y si quiere estar del equipo de la innovación de la inclusión del progreso o si quiere ayudar a los bancos a guardar su mod y creo que es una es una cuestión para para más porque + +[36:00] Creo que hay hay muchísima infraestructura que no depende de los bancos está ahí para los bancos que también se puede usar para las intex para que las intex puedan ofrecer servicios financieros y esto es gay y pérez puede no conspirar o pueden unirse a la lucha de la libertad financiera y biz es un ejemplo clarísimo porque. Pues están ahí en medio pero por ejemplo a kaká o page ya no lo dejaron operar en méxico. Entonces. Ahora van a ver que todos directos con visa a sacar sus programas o con un banco sponsor va pero pus y la pregunta acá para visa qué opinan ustedes qué lado van a tomar y lo que diga es cuidado te voy a tocar a tu puerta el lunes a arnoldo neutral por la + +[37:00] Cláusula 27 artículo 2 de nuestras reglas su empresa es mencionada en el panel tiene la prioridad de la respuesta. Entonces no no hay problema la verdad que vista si está en una posición muy interesante no nos sentamos literalmente en el medio de varios ecosistemas no tradicionalmente si conoces la historia de vida nace con un consorcio de bancos en eeuu pero diría que aunque hoy en día la mayoría del negocio de las redes de pagos no solo vista sino las otras redes de pagos proviene de un negocio que es un negocio relativamente simple no las tecnologías muy complejas pero es muy simple tienes miembros que están licencias para estas redes se asocian a las redes que estos miembros hoy en día en su mayor parte son bancos y emiten o distribuyen credenciales de pagos vistos con cualquier otra marca y nuestro nuestro rol es básicamente asegurar que el que entre esas partes + +[38:00] Los bancos que los adquirentes los comercios de los consumidores ese flujo de dinero ocurra y al fin del día esa es la tecnología de visas y nosotros lo que hemos hecho vivido en los últimos años es emitir licencias principales a fin text como su fin textil no pintas para que sean otro miembro de la vez de vista no a la misma vez hoy en día tienes empresas que son pensaste en américa latina que ya son miembros principales lo mismo que es un banco y pueden transaccionar pueden consumir los productos de vista etcétera pero históricamente diría si la empresa y las otras veces de pago han sido agrupadas con bancos porque el modelo de negocios sale sale de ahí no yo creo que + +[39:00] Hay un tema regulatorio que también tenemos que nosotros lidiar igual que las tintes no me voy a meter a casos específicos pero por decir temas de cacao es decisión de la cnv o de los otros reguladores no es decisión de vista más bien tenemos una alianza estratégica con cacao y muchos más porque nos ayudan a habilitar exintex y en muchos casos ayuda en estas fincas para poder emitir llámese una tarjeta de crédito o débito prepago de una forma bastante ágil no. Entonces te creo que nuestra posición es y va a seguir siendo como como aportamos al crecimiento del ecosistema y al fin del día nuestra filosofía es el consumidor va a elegir el mejor producto llámese una tarjeta de traigo obviamente una tarjeta de un banco mexicano o de cualquier país y nosotros lo que hemos + +[40:00] Estado haciendo llámese a través de un programa que le llamamos el central cafta que el pensar con axtel víctor red y a través de vista de angers más bien es para ayudar a crecer el ecosistema cuando dinero mueve por las redes de visa este dinero puede haber sido generado esos pagos generados por cintas por comas players o por bancos tradicionales pero si creo que hay hay mucho trabajo que hacer en la región porque algunos mercados que dicen los países que dicen estamos impulsando la innovación a la misma vez uno ve y dice. Bueno como dices que está impulsando la innovación y las regulaciones más bien que no permiten que una finta que puede hacer algo y parte del rol que tenemos es justo educar a los reguladores desde qué son las criptas monedas y que + +[41:00] Browsing hasta cómo te era un vil onion porque son modelos relativamente nuevos dentro del ecosistema pero hoy en día es un esfuerzo que no sólo vive yo creo que el resto de las redes de pagos que estamos tratando de agilizar hoy y el punto de bru en no es que si estamos al lado de los bancos o al lado las cifras yo creo que es que estamos en el medio y el modelo de negocio más bien se alimenta y crece a medida que existan más más empresas van con fines lo que éste hace manejando productos vistos desde otras redes en general el ecosistema si crece no no es necesariamente tomando el lado de una personalidad y arnoldo que no sientes como que estoy atacando pero no ve claramente vista está [Aplausos] empujando la tecnología y estás en este panel block chen hablando con con los de + +[42:00] Este lado y los de trae baile y los de wire o serpientes claramente ustedes están empujando para eso pero en mi pregunta es como que y no no conozco muchísimo ya detalle la integración que están haciendo visa estelar pero podría ser que un cliente pueda gastar su dinero en su world directamente en visa y visa a recibir por ejemplo algunos te busco en a cambio de ese dinero que va a gastar o siempre va a tener que haber un banco en medio de visa y la gente que no mira justo en la parte ya más específico de escrito miraremos hemos habilitado la + +[43:00] Liquidación lo que es el estado de la transacción en you es distinto. Entonces ya añadimos web bici componemos la edad de las 150 monedas dentro del ecosistema de visas ya smith y. Entonces ya aceptó players pueden hacer liquidación directamente en vista net con diversity hemos lanzado el club de beitía a través de una inversión que hicimos con la inscripción en eeuu para ofrecerle a cualquier jugador en américa latina que quiera dar esos servicios de compraventa almacenamiento de cristo' monedas a 13 delicias de vista que hay unas cosas que estamos desarrollando alrededor de energía y como ayudar a personas que están creando energía a comercializar eso. Entonces creo que al fin del día no se trata de necesariamente y asegurarnos que siempre haya un intermediario se trata de cómo podemos mover valor siempre y cuando sea segura y confiable + +[44:00] Y todo eso porque hay una cosa que sabemos que lo más íntimo para una persona es el dinero de ellos o para una empresa. Entonces creo que aún falta muchísimo por hacer justo cuando estaba en. Ahora la filosofía era estamos haciendo. Ahora para que no haya intermediarios no de persona a persona o lo que sea y. Entonces creo que eventualmente vamos a encontrar casos de uso donde si no no hay por qué estar o tener dos o tres intermediarios de eso no necesariamente se trata pero para ser honesto eres guapo vers porque muchas tareas son relativamente nuevas para las redes de pagos sí yo creo que está esta discusión conecta muy bien a la pregunta porque quizá está en el medio sí o sea yo yo miro como rubén habló que si es ético o que es correcto que se puede utilizar en el + +[45:00] Juego capitalista y cree que vista tiene intereses en mantener los los 'big ben' xxxii y ellos continúan a utilizar visas pero tiene la posibilidad de traer la tecnología de las startups para los niveles y también traer las tarjetas de no acceso a crédito para las fintas xxi. Entonces lo que nosotros estaban miramos es que el certamen de universidad es una oportunidad inicial para y otras estimó coins en otras monedas como el yen bridgetown euro y cosas así si se quieren una una opción para se profundiza en otras geografías donde para los prescriptores first esto es muy mainat más natural de lo que pasar por un banco y tener más más costos y. Entonces creo que hay como si roberto perdón una pregunta nada más eso que está diciendo y dado a que el panel es de américa latina hay ahorita un producto bis estar interesado en crear un producto obviamente con el tiempo para darle a ese mismo acceso a esas cuentas que + +[46:00] Están de visa que están conectadas a usted sí a gente en toda la región de américa latina darles plásticos con los cuales ellos puedan gastar sus gastos y en cualquier país como este pregunta para mí me imagino sí sí claro incluso estamos viendo un caso de uso en américa latina hoy en día con marketplaces no y. Entonces billeteras en ciertos mercados van a poder percibir esos pagos directamente y directamente a la billetera yo es decide un usuario y va a tener un credencial de pago si la empresa quiere digital o físico y esta persona automáticamente el vestido este pago le cae en la billetera lo empuja a un credencial vista y si es digitales o físicos y. Entonces hay hay casos de + +[47:00] Uso que ya estamos y ya se están trabajando en este sentido no que comience la otra oportunidad es las promesas internacionales que cuando piensas de iurgi si puedes pensar bisturí payments entre la versión de ventas internacionales pero también de metas domésticas dentro de américa latina donde hay un sinnúmero de casos más bien te diría me encantaría hablar de qué caso de uso te interesaría construir porque justo mi equipo está liderando estos temas de cristo y entre más casos de abusos que siempre levante mejor para nosotros nosotros somos clientes de vista estamos muy feliz porque tenemos una tarjeta prepagada que puede usar para gastar tu dinero en el tiempo pero obviamente hay muchas mejoras que le puede hacer es el producto y hay networking roberts pero es increíble que estés aquí representando a una empresa con tanto poder en esta + +[48:00] Batalla y batalla porque creo que estamos haciendo todos que la gente tenga mejores deseos financieros no estoy de acuerdo y yo tengo un rol un poco interesante no porque estoy dentro de vista y empujando todas estas iniciativas pero a la misma vez o sea me siento en la junta de varios de varias sintex y vengo a este mundo. Entonces también tengo este cancha de viento y es interesante porque las dinámicas son son algunas veces puede sacar la ventaja pero el punto es poder abrir el ecosistema lo más posible y señores estamos como que es el canal del tiempo final tenemos una pregunta cae en el chat y roger rengifo pero ángel pregunta se podría hacer que a través de un plástico visa una persona pueda comprar un toque en lista de la red de bots + +[49:00] Instalar. Entonces rogel olorio ya ya se puede hoy algunas guantes y que están conectadas a este al racc set en visa y mastercard a través de terceros como un pay simple extraer y otros y. Entonces acá se cree que los ésteres una wallet donde pueden comprar las de 'estable' cuencia también los utiliza y tokens que están han estado utilizando una tarjeta de crédito pero que de carnoedo a mencionó acá para rubén que una de las cosas que está la está trabajando con visa es que el fast track para para emitir una tarjeta visa para las literas que también para los intereses no sea sólo para permitir que las personas lo compren y mantengo activos a cripto utilizando la tarjeta pero también para que la finca que lo emite la tarjeta hasta la liquidación con lista utilizando yo es decir otra moneda digital sí porque ahí tenemos un ecosistema descripto si no no hay por qué pasar por bancos y + +[50:00] Tasas y se toman desde un día o dos días. Entonces la respuesta que cree que es la tenemos algo así. Bueno antes que yo pase para él y hablar un poquito de startup fue quien yo creo que voy a poner una pregunta final acá para el panel y me quedo la verdad de que el tiempo se va muy rápido cuando tenemos un asunto tan interesante es que tenemos creo que emprendedores y personas que tienen están comenzando empezando sus proyectos en el grupo a si tiene alguna algún consejo algún comentario final desde sus partes para para para ellos que están empezando con con sus startups en en la cama y yo creo que muchas palabras positivas fueron habladas hoy de como dices de tanto el pound erc. Ahora existe como rubén lo hablo a jose a usted también no no hablo mucho por el tener si se puede empezar tal vez con algún pensamiento suyo y después abro para para nosotros se después pongo en la pantalla para él y + +[51:00] Absolutamente para mí y como dijo no lo también wire apostó que la jugada ganadora era en soportar a la comunidad de desarrolladores y emprendedores con la crisis factura y herramientas tecnológicas y licencias para construir las empresas del futuro financiero con proyectos de blogging compra de clips monedas cuáles son carteras descrito monedas estables mencionaron a monedas estables con sidor es exactamente el trabajo que estamos hacemos para poder comprar esas estables con una tarjeta de visas a pagos por último movimiento de valor a una parte muy importante de construir nuestra red y es trabajar muy cerca consell de ibiza extra el ecosistema latinoamérica y el resto del mundo así que para nosotros es exactamente la apuesta que hicimos es poder soportar a asustarla a los que están que están empezando a a construir las empresas del futuro financiero me gusta este consejo además salía con + +[52:00] Una idea nosotros también somos clientes de la empresa de jose y de esperar y de visas vamos yo creo que el mejor consejo que le podría dar a alguien en el tinte acá lo que quiere emprender en la industria es agarrar y pensar en cuál es un servicio financiero que no les gusta cómo se lo están ofreciendo el que sea en cualquier flujo de dinero y después agarren y digan que como lo puedo hacer mejor bien que tanto mejor lo pueden hacer y si ser. Bueno. Pues lo puede hacer dios es mejor voy a ganarme un 30% margen lo que estoy vendiendo haga una empresa y después agarre + +[53:00] Liberal que tecnología hay ahí para poder resolver lo que existen usar para resolver esos problemas y la hace un producto a medida de que haya un problema una oferta que no esté personalizada que digan ahí esto es como que estoy usando windows xp pero. Pues nosotros los días y lo tengo que usar de versiones. Perfecto más alguien estará el nodo más algún baja en la pregunta con la emprendedurismo con crowdsourcing. Entonces yo creo que cuando rubén habla de algunas experiencias que no sean buenas pero por lo menos para eso que emigre de latinoamérica para canadá cuando el joven y para acá lo proceso de enviar efectivo era terrible sí. Entonces yo creo que transfer way se fue la primera que yo utilicé aparte de los bancos porque ellos y sean mi experiencia muy + +[54:00] Mejor y hoy yo tengo 10 transfer wise para utilizar muchas de ellas desde latinoamérica así como como dicho que está haciendo un trabajo muy. Bueno en méxico. Entonces creo que que se conecta con su pregunta edad me el santos. Bueno señores. Entonces a poner la pantalla para que él y pueda hablar un poquito del startup weekend y me gusta ir a encerrar mi parte de ustedes agradeciendo mucho por lo tiempo la sinceridad en la unicidad de las respuestas y ideales o como con la inspiración para que los emprendedores acá de latinoamérica no los hagan la región crecer aunque más rápido para que ponemos a entender la posición de liderazgo. Entonces muchas muchas. Gracias a todos ustedes y los espero hablar tan temprano de nuevo. Bueno él y con usted voy aa muchas. Gracias a todos. Gracias + +[55:00] Muchísimas. Gracias roberto y bueno yo soy el y becerril. Soy regional manager para latinoamérica de los programas de comunidad en textos y también. Quiero darles un consejo adicional de los panelistas que es que emprendan en este momento avienten pse a participar en el próximo texto extraer week en blogs enlazan con visas y con estelar que justo estamos ideando estamos poniendo este proyecto o este gran evento a su disposición para que todas estas ideas que se hablaron en este panel y muchas otras problemáticas que se necesitan resolver en finanzas en la región podamos unirnos con personas de toda américa latina que también les interesa emprender que también les interesa el blogging que también les interesa darle una cara diferente a las finanzas y + +[56:00] Trabajar en un fin de semana en donde podrán llevar esa idea de un paso más a un prototipo a un modelo validado y que vamos a tener un grupo espléndido de mentores y de jurados durante este fin de semana que seguramente les van a ayudar a conectar y aprender muchísimo si quieres pasar la siguiente diapositiva robert es una experiencia completamente inmersiva y 100% online en el día 1 ustedes platican su idea de todo el grupo de participantes y se forman estos equipos multidisciplinarios con personas de diferentes lugares de latinoamérica para darles una idea ahorita ya van personas registradas de méxico argentina costa rica república dominicana ecuador y hasta algunos amigos hispanohablantes en españa. Entonces realmente estamos conectando la comunidad hispanohablante a la comunidad latinoamericana para poner manos a la + +[57:00] Obra y transformar las finanzas en latinoamérica con block sing el día 2 el día sábado lo que hacemos es que tenemos a un grupo fenomenal de expertos en la industria gente que ha trabajado en diferentes startups en diferentes organizaciones financieras en américa latina en común y que lidera comunidades de block change también en la región que van a ser los mentores para que ustedes puedan trabajar durante este día en hacer un prototipo y en justamente crear un modelo de negocios que tenga sentido así como lo platicaron los panelistas y por último el día 3 que es el domingo con todo lo que ustedes construyeron el fin de semana van a poder presentarlo en un formato de pitch ante un jurado y además. Gracias avisa y. Gracias a la medición vamos a tener + +[58:00] Premios especiales y si no fuera poco todo el dinero que se recaude en venta de boletos va a ser destinado a laboratorio punto de lea que es una organización sin fines de lucro que busca traer mayor diversidad y oportunidades para las mujeres en latinoamérica para que sigan desarrollándose a través de aprender a programar así que es un evento que tiene muchísimas cosas positivas es un gran momento como ya lo decían para emprender en latinoamérica y continúa este impulso de transformar las finanzas en la región este evento va a ser la próxima semana del próximo fin de semana del 15 al 17 de octubre 100% en línea para todos en américa latina la página es de s w blog change latam punto 0 y por estar en este en este evento con nosotros tienen un código especial del 50 por ciento de + +[59:00] Descuento que este s w l 50 y ojalá que puedan compartir este evento con las personas de su comunidad y unirse claro que si para verlos el próximo fin de semana del 15 de octubre y muchas. Gracias y muchas. Gracias por hacerlo rápidamente a es un orgullo hacer contrataciones por la iniciativa y luego lo estímulo incentivo a todo lo que participan acá que que los participen del evento. Entonces muchas. Gracias de nuevo a los panelistas a todos que estuvieron con nosotros yo voy a quedar algunos minutos para responder algunas preguntas en el chat pero esto por hoy muchas. Gracias a dios a todos en un buen resto de semana + +
diff --git a/i18n/es/docusaurus-plugin-content-docs/current/meetings/2022-05-06.mdx b/i18n/es/docusaurus-plugin-content-docs/current/meetings/2022-05-06.mdx new file mode 100644 index 0000000000..d6734c7f30 --- /dev/null +++ b/i18n/es/docusaurus-plugin-content-docs/current/meetings/2022-05-06.mdx @@ -0,0 +1,139 @@ +--- +title: "Construyendo el Futuro del Sistema Financiero con Blockchain" +description: "Panel sobre fintech y blockchain en America Latina, con enfoque en rampas fiat-cripto, pagos, remesas y colaboracion del ecosistema." +authors: [mercedes-vila] +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +En este panel, lideres de fintech y blockchain en America Latina comparten sus recorridos y explican como usan la tecnologia para ampliar el acceso a servicios financieros. Se habla de rampas fiat-cripto, pagos y remesas, costos de transaccion, y el rol de redes como Stellar para habilitar operaciones mas rapidas y accesibles. + +La conversacion tambien aborda colaboracion entre empresas del ecosistema, educacion para nuevos usuarios, y consejos practicos para emprendedores que buscan construir productos con foco y valor real. El cierre invita a sumarse a iniciativas de comunidad y eventos para acelerar la adopcion en la region. + +
+ Transcripción del Video + +[00:00] Frei. Hola a todos y bienvenidos a este panel fenomenal llamado negocios impulsados por blogs en latinoamérica. Mi nombre es mercedes madani de líder o partnerships en by brand la billetera virtual desarrollada en estelar. Es un honor para mí estar aquí hoy con los líderes de block change de latinoamérica. Quiero ya hacer una breve introducción y que los conozcan. Veo que estamos todos y bueno empezamos con rubén. Hola ruben y bienvenido contanos un poco acerca de voz y de arte. Hola como estan se puede escuchar bien es cristian. Perfecto. Pues yo soy rubén galindo. Soy el director general de el tiempo o sitio. Llevo siete años en este trabajo. Ha sido una gran aventura que nosotros empezamos + +[01:00] Con la misión de darle libertad financiera a la gente de américa latina porque nosotros creemos que es una es un derecho humano el poder tener acceso a servicios financieros que te dejen hacer lo que tú quieras hacer y muchas veces se nos hace como dijo me está escuchando no estamos escuchando 65 amigo mi computadora me está diciendo que cambio de selección de audio pero. Bueno se nos hace justo que la gente tenga acceso a a la libertad financiera y por ese sentimiento de injusticia nosotros creamos esta plataforma para que todo el mundo pueda tener acceso a dinero que esté conectado con el resto del mundo sin importar en donde estén y sin importar las condiciones de sus países y me encanta estar aquí en una plática de blogging porque creo que + +[02:00] Este movimiento de la libertad financiera o esta lucha es una lucha descentralizada que se está luchando de diferentes frentes por diferentes empresas que componen diferentes partes del ecosistema y me encantaría que como resultado de esta llamada haya más emprendedores que estén dispuestos a unirse a la batalla asumiendo un rol en cualquiera que sea de las de las verticales que hay o de los pasos de la carne de suministro para poderle dar a los americanos en la gente todo el mundo libertad financiera. Gracias a rubén impresionante la misión y bueno queremos escuchar más. Ahora que yo introducir a jacques ya. Gracias por su muerte en obras de ustedes por la invitación. Hola a todos yo soy jack saracco cofundador y líder de business development and partnerships en seattle somos la rampa fiat más grande + +[03:00] Latinoamericana nosotros de adquirimos usuarios dejándoles comprar cripto con con fiat fiat local en argentina brasil y méxico y bueno muy pareció lógico rubén nosotros iniciamos en 2018 con el objetivo de poder ayudar a las masas ya todo el mundo a trans es transición ar desde fiat a cripto de la forma más fácil posible más rápida y con la mayor escalabilidad creo que nosotros estamos como en un leyes más infraestructura del que es el arte es más rica y nosotros apuntamos más a ayudar a la descentralización con instituciones con aplicaciones centralizadas que quieran empezar a captar usuarios y darles la libertad financiera pero de vuelta siempre pensando en cómo darles la oportunidad del latinoamericano que en su gran mayoría sufrimos la inflación el exceso del poder monetario sobre nosotros cómo darle este poder de vuelta al pueblo y + +[04:00] Al usuario en vez de que se los queden unos pocos así que eso es lo que hacemos en zettel y me parece que comparto exactamente lo mismo que rubén si de esto podemos sacar más emprendedores que quieran crear más aplicaciones con más motivación al crear una industria cripto más grande todos también ganando el frente. Gracias por la intro también queremos escuchar más de voz y de ser un network. Ahora. Hola milagros contentan contarnos acerca de voz y ya cerca de bay brand l mercedes sole a todos y. Gracias por la invitación. Mi nombre es milagros chorro ti. Soy líder de marketing de contenidos para buy brand by frank t es una billetera digital que utiliza la tecnología breaking y que permite comprar almacenar enviar y recibir yo así si nuestra billetera está desarrollada en exterior block change y esto hace posible que los usuarios puedan enviar y recibir transacciones sin límites y de manera gratuita además somos parte de Stellar + +[05:00] Development foundation la organización sin fines de lucro que supervisa la red styler y bueno para contar un poco mi historia personal y como llegué acá me formé en relaciones internacionales trabajo en marketing hace algunos años hace un poco más de un año sería el mundo sin tequia esta industria y bueno súper contenta de estar acá y de compartir este evento con este increíble panel de speakers que tenemos hoy mil. Gracias milagros. Quiero aclarar para los que están viendo que milagros y yo trabajamos juntas y bueno impresionarte tenerte acá en este panel y. Ahora matías ansiosos por escuchar acerca de voz y de cronos page está el mercedes un gusto y un gusto a todos también por estar acá y. Gracias yo soy cofundador y. Soy sitio de cronos pero es una empresa una startup que básicamente nos enfocamos a esta lucha + +[06:00] Como bien decían los chicos desde la diferencia del grande contra los pequeños desde sudamérica y con toda la lucha de las grandes de los grandes costos de transferencias de cobros y nosotros aplicamos básicamente toda la tecnología blogs en particularmente al estelar al tratar de optimizar todos esos carriles y tratar de llevar la optimización de costos y la rapidez de transacciones no solamente las personas sino también a las empresas a los emprendedores a los freelance y dejamos un poco muy importante no solamente en lo que son pagos online sino también en pagos presenciales que después si quieres te explico un poco en detalle si queremos escuchar de todos impresionante y super innovador y bueno con este panel de 10 queremos difundir las posibilidades de la tecnología block change y seguir impulsando el ecosistema democratizando el acceso a tanto de usuarios como desarrolladores y emprendedores creemos que este panel sea interactivo así que si tienen preguntas por favor hagan las a través del chat es + +[07:00] Un y al final de la charla vamos a intentar contestar algunas. Bueno ahí empezamos. Quiero empezar con una pregunta bastante simple como descubrieron block change y cuál es su historia en el ecosistema rubén a ver si quieres empezar de vuelta claro con mucho gusto yo empecé en blogs en el año 2015 quizás en 2014 alguien me invitó un desayuno en noviembre a platicarnos del bitcoin estaba en la casa de mi mejor amigo que se casa mañana y tengo que escribir unos pits justo para su boda y ahí en esa mesa y yo tenía una oferta de irme a ser un intensiva que eran los primeros sexting es descripto que existían y se llamaba pero yo no sabía si quería ir yo nomás + +[08:00] Queríamos tanto en san francisco llegue el primero de enero o el 8 de enero creo del 2015 a este start-up y nuestra tarea en este estado como interna será venderlo no saber a quién le podíamos vender la promesa del el bloque y es más ellos tenían su propio research inc que era una abstracción del blog en muy única para sus tribu coins y estaban antes de todos haciéndolos de bocoyna y se lo tratamos de vender a empresas de telefonía celular móvil a bancos empresas remesas nadie lo quería y. Entonces uno de nuestros cofundadores fue como a ver aquí tenemos la tecnología que nos ayudaría a darle a quien sea acceso a dinero libre quienes esta dinero libre piensa y vamos a entender por qué la gente usaba bitcoin argentina a venezuela y obviamente es bastante fácil + +[09:00] O sencilla la razón porque la gente ve valor en esta tecnología porque tienen escasez de libertad financiera y. Entonces trabajando en lo pulp queriendo ser emprendedor y un san francisco fue como salió la idea de. Pues luchar por aquellos que les hace falta de libertad apalancarnos de los dos me encantó la historia y bueno lo mejor escribiendo tu escribiendo a tus fichas para para la boda no tal cual tal cual si quieren si yo les cuento un poco la historia de como. Bueno yo arranqué también cerca del 2016 2017 descrito por primera vez viendo por donde se movía que era como interactuaba creo como argentino la gran mayoría entró por + +[10:00] La inflación el peso la potencialidad de que pueda hacer una reserva que nadie te pueda tocar y que borrarán de ser dueño de eso en el 2018 un poco la pregunta fue cómo creamos algo que tenga mucho valor a futuro y nuestro análisis fue vemos que hoy todo el 99.99 9% del dinero del mundo está del lado de institucional está del lado del fiat como creamos algo que sea directamente ayudar a la gente a meterse en cristo nosotros tuvimos una yo en lo personal todo algo bastante difícil estar mal escrito le tenía que ir comprando p2p a gente e iba comprando un poco de bitcoin cada tanto las transacciones eran en efectivo porque si no tenía miedo de que de que alguien te pasara por arriba. Entonces de alguna manera pensamos cómo podemos solucionar ese problema. Bueno así es como nosotros pensamos de hacer una rampa fíjate en ese momento y después un poco en la inspiración de ser una rampa fíjate aquí + +[11:00] Producto se le puede pinchar a esto irá a ser un producto de remítanse y poder mover dinero e internacional de un lado a otro tenemos muchos clientes que nos estaban pidiendo y en ese momento dijimos 2019 principio de 2019 la pregunta fue. Bueno cuál es la bloque más barato la que se puede enviar fondos lado al otro empezamos a ver absolutamente todas todas las que en ese momento estaba en el top 10 top 20 de esas quedaron 5 que hoy siguen estando. Bueno ayuda estelar entre dijimos. Bueno empecemos a hacer operaciones por encima estelar porque es escalable es rápido y hoy desde la mentalidad de un argentino con los costos de argentina no tenía sentido pagar en misterio un ángulo que valía entre 4 y 8 dólares el gafi y dijimos. Bueno esto es in escalable empezó argentino. Pues ya es muy caro. Entonces cuál es más barata que rd más barato + +[12:00] Entonces. Bueno estelar también siendo un nombre y no para nosotros y así nos convertimos en rampa fiat eran corp para todo lo que es tela y creamos el table con el queso argentino y el real brasilero muy buena la historia buena mucha visión también cinco años años. Bueno para compartir mi historia es súper distinta y super reciente en comparación a la de jackie y rubén su entrega al mundo block sin en pandemia en mi caso antes de empezar con block sing trabajaba para otras startups que no tenían nada que ver con block chino crypt y si bien me parecía una industria súper interesante en la cual he trabajado anteriormente en ese momento estaba cerca de terminar mis estudios quería empezar a explorar otras otras industrias creciendo un poco más en marketing y así es como llegué a by brand llegué por por trabajo y me terminé enamorando de blogs inscripto pero. Bueno al principio al no tener experiencia + +[13:00] En esto tuve que interiorizar me y aprender desde cero todos los nuevos conceptos y conocimientos que no tenían conocía un poco blogs sin conocer un poco cripto por por mi anterior trabajo pero. Bueno mi conocimiento era súper limitado en comparación con lo que sé. Ahora no sí la verdad es que también como contaba ya actúe la suerte de trabajar con con una opción de bajo costo como es este leer y eso creo que tuvo un papel súper favorable a la hora de y experimentar un poco a nivel personal con cripto y bueno también bailantas digamos meses antes de que yo empezara a trabajar vio la oportunidad de darle a los argentinos este medio alternativo para protegerse de la devaluación y lo que más me gustó de la app es que está diseñada para poder ampliar el acceso a las escoles como yo sí sí que tiene una gran misión que es democratizar cripto + +[14:00] Hacerlo súper fácil permitiendo que. Bueno todos los argentinos puedan acceder a una billetera a su perfil play y segura así que. Bueno eso es un poco de historia de heres de cómo descubrió bien. Gracias milagro y bueno espero que a todos los recién graduados que nos están mirando se inspiren y bueno se animen a sumarse al ecosistema. Bueno eso sí si eso. Entonces lo mío arrancó también como ya que siendo argentina encontrando alternativas de mantener valor de la moneda en 2015 más o menos lo cual me interioricé mucho pero después de eso yo estuve muy involucrado en la parte de desarrollo y tratar de darles soluciones a la certificación de documentos según origen y me metí mucho con blogs en para para entender cómo podemos darle valor a un documento que se pasa de forma digital y ver si es original o no. Entonces esa fue + +[15:00] La primera empresa que dice trabajar en mucho tiempo una multinacional y fue un éxito que tuve en donde lo que hicimos fue certificar documentos para educación este proyecto lo venimos a eeuu y ahí lo que hacíamos básicamente es el documento que salía de la universidad tener un qr y con ese qr de certificar que realmente venga de la entidad emisora y ver que la persona se estaba presentando un trabajo por ejemplo vida de ese trabajo así que arranque fuerte con la parte de blocks y más que en la parte de certificación y después de esto entendiendo el panorama de sudamérica y viendo que había una tecnología de procesamiento de pago que hacía 20 años que no se modificaba estamos 100% bocados. Ahora en tratar de actualizar esos procesamientos de pagos con block change y básicamente con la solución de stella así así estamos. Bueno. Gracias matías me encanta ver cómo las distintas rutas que tomaron y bueno el caso de uso que recién contaste en términos de cómo entras te que demuestra + +[16:00] Las distintas como las distintas capacidades de lo que puedes hacer en blogs y no así que. Bueno buenísimo. Bueno a otra pregunta me encantaría saber más acerca de cómo piti sanz blog change en sus empresas y cuáles creen que están los principales beneficios de blogs en para sus clientes a ver empezamos con ya que esta vez sale. Perfecto. Bueno nosotros somos nativos en blogs chains entienden que nosotros creamos la empresa para poder darle la solución empresa escrito y empresas blogs change en particular uno de los servicios que vemos nosotros como se les comenté aceptamos fiat en el país en argentina basic y méxico y damos escriptor y se puede hacer tanto compras 90 no sólo aplicaciones descentralizadas a billeteras a ecosistemas sino también a extreme luís díaz institucionales tenemos muchas empresas que hacen corporate permítanse de un lado o de otro a partir de eso puede que nos interesa mucho trabajar constelar a + +[17:00] Nosotros mismos que había un costo muy grande en las transacciones de remítanse tanto de de retail de gente que tiene modo del dinero de eeuu por ejemplo a latinoamérica o empresas también se quieren pagarle a su filial argentina o latinoamericana vimos que había un costo muy grande. Entonces la oportunidad y de poder usar un block change era increíble porque tener una doble opción de bajo costo y rápida de ejecución nos hacía muy escalable a la hora de la contraparte que vendría a ser el institucional banking services que se estuvo dando en los últimos 50 años. Entonces lógicamente el 2018 y 2019 nos costaba muchísimo hacer interpretar a la gente de estas empresas que era blog txemi cómo pueden utilizar a su favor estas transacciones cuando lo empezaron a ver realmente en que estaban ahorrando mucho dinero haciéndolo y mucho más las + +[18:00] Acciones y empezaron a en el caso de uso genuino en su día a día y en su bolsillo pero cuando empezaron a querer reiterar las operaciones con más volumen y empezar a referir lo a gente diciendo esto funciona y es algo real y creo que es muy importante resaltar que 2018 1.019 muchos creían que los escritos y antes también no mucho quedaban producción cripto como algo ilegal algo como desde el mercado negro que no hay que meterse hoy creo que 2022 cambio muchísimo más la perspectiva hoy parece de emprendedor innovación y el futuro con muchísima agilidad y transparencia cuando hace hace pocos años no era así y yo creo que cada vez vamos a empezar a ver más empresas que se vaya están moviendo en este camino está increíble que mucha más gente de la institucional empieza a darle opción inscripto como en el medio para interactuar financieramente en un futuro + +[19:00] Buenísimo. Gracias por los detalles me encanta escuchar acerca de empresas que desarrollan la infraestructura y las rampas en latinoamericanos que seguir trabajando en el acceso a la blogs y hacer que más gente digamos más usuario final se sumen me parece algo súper importante totalmente yo creo que somos como un puente en el que cruzamos a la gente de fiat a cristo y el objetivo también de las aplicaciones como pueden ser verán como puede ser el pie de mokro no sé directamente mantener a los usuarios y darles un servicio dentro de escriptors mientras nosotros somos el puente que interactúa de un lado al otro buenísima me gustaría añadir creo que ya tiene toda la razón dado que estamos en un panel con más emprendedores creo que es importante que todos vean como el blog chino solamente + +[20:00] Es tecnología que permite que digo específicamente aplicada a los pagos pero permite pagos mucho más fluidos más baratos este intercambio de información de dinero y también es tecnología que democratice el acceso a un emprendedor a participar en un negocio que antes estaba super a estrado y ella estaba disponible solamente a aquellos bancos instituciones financieras que necesitaban muchísima inversión para poder participar de cierta forma están estamos como industriales intermediando a estos jugadores el dinosaurio casi y es súper abierto cualquiera puede participar y. Entonces me hace no solamente una tecnología disruptiva pero también se me hace dijo una gran herramienta para poder + +[21:00] Emprender y generar valor sin o con un poquito de barreras de entrada me encanta tanto totalmente de acuerdo está muy. Bueno no comunicar de esto a los emprendedores para que sepan ustedes que se puede y que se animen a sumarse al ecosistema y así que obviamente todos en este panel como líderes en la industria me parece clave que nada que se sumen a estas charlas y que nos cuenten un poco cómo fue su historia y qué hacen hoy y los problemas que están solucionando para que para que evolucionemos este ecosistema juntos y por conversar un poco sobre esto. Bueno en nuestro caso con buy brand como comentaba antes fiver le dio la oportunidad y el acceso a los argentinos a un medio para poder resguardar valores el valor de sus activos y bueno como app + +[22:00] Desarrollada en estelar vaivén fue específicamente diseñada para ampliar este acceso hasta coins anclados a el precio del valor del dólar como yo sí sí y creo que con respecto a los beneficios digamos generales de esta tecnología creo que el beneficio más importante que podríamos destacar para nosotros es la descentralización creo que es una característica en mi opinión principal de la tecnología y es su punto fuerte ya que para poder autenticar transacciones u operaciones uno no necesita ningún tipo de intermediario y esto obviamente permite reducir los tiempos de validación por otro lado tenemos el hecho de que de la distribución de la red cuando vos tenés una red distribuida en primer instancia nadie es dueño de esa red y creo que eso hace que los usuarios no tengan en todo momento un registro de la + +[23:00] Información y además que. Bueno como comentaban los chicos antes la tecnología tiene costos muy muy bajos para los usuarios que creo que esto es como la ventaja estrella del block chain y bueno en nuestro caso en el caso de buy brand también las transacciones en sistemas digamos descriptos se realizan personas personas sin ningún tipo de participación de un organismo de control central qué quiere decir esto de nuevo sin ningún tipo de intermediario. Entonces esto permite reducir los costes de digamos mantenimiento de la red lo que la hace significativamente digamos baratta no y después por último creo que podríamos destacar. Bueno también como comentaban los chicos antes la rapidez las transacciones cripto son súper rápidas de procesar diría que prácticamente inmediatas y esto creo que también es una gran solución que tiene blogs reynolds poder de agilizar estos procesos lentos de las transacciones + +[24:00] Bancarias buenísimo. Gracias milagros por contarnos más acerca de by brand y bueno. Ahora matías en el canon en el caso de cronos pay por ahí si sé de caso de cross básicamente nacimos 100% en blogs en digamos fue también la decisión digamos sobre quien luche en correr un poco de lo que hablamos el tema de costo velocidad de distribución y todos elegimos estelar por ese caso pero nacimos en block change siempre pensando en que la tecnología digamos va a ayudar como está ayudando a cambiar el sistema de procesamiento de pago y yendo un poco al detalle digamos cuando se genera una cuenta dentro del crono space automáticamente se tiene una cuenta una cual es de estelar y a partir de ahí en función del país que esté se genera una cuenta bancaria a uno del país si lo requiere y ahí usamos + +[25:00] Los otros zancos del ecosistema para hacer el rambo una rampa off como decía jack de las este vil coin y la conexión a fiat en cada unos países y también usamos los zancos cripto cripto para salir a veces water yum str sub-20 dependiendo digamos nada las alternativas que se requieren y un poco sumando también a lo que decía rubén el porqué digamos de para mí la importancia de todo estos cambios o de que gente externa del sistema financiero tradicional se meta yo soy. Quiero electrónico tengo una envidia una maestría en finanzas pero no fue mi educación digamos nativa la parte financiera sino que estuvimos un poco más que nada de de la vivencia y de los problemas que tenemos en los países emergentes y un poco estudiando cómo se componía todo esto vos ves que los bancos de + +[26:00] Inicialmente s trabajaban con estos sistemas y que son varios y son únicos y que siempre fueron turnándose para que se mantenga dentro de su ecosistema muy cerrado en la parte financiera hasta que en un momento un grupo muy selecto digamos generó un club de muy élite donde iban a comer caviar y en una cosa muy particular y ahí se generó la primera tarjeta de crédito creada en el club que era por eso el de las cenas y que dicen que se usar digamos para para ese tipo de transacciones que ahí se vino en que los bancos empezaron a generar sus propias tarjetas de crédito y que los mismos bancos son los dueños de la tarjeta de crédito. Entonces sigue siendo el mismo ecosistema y hoy en día nosotros dónde vamos con cronos page es decir existe una posibilidad que nos da la tecnología blanchett para tratar de que todas esas altas comisiones que se cobran transferencias centralizaciones podemos usando una forma diferente que cualquiera que puede tener un emprendimiento en perú en ecuador en + +[27:00] Charco o en una provincia de cualquier país de sudamérica o de un país emergente pueda cobrar con su teléfono con costos y la plata que en las personas sino en intermediarios si es buenísimo es un nuevo paradigma y bueno me encanta escuchar que todas las empresas son como blogs en late night tgif nose que empezaron directamente en blogs en. Entonces me parece súper innovador más que estás como tratando de optimizar un sistema sino que no cambiarlo completamente tiempo ciento buenísimo y. Ahora. Veo una pregunta en el chat que esta interesante dicen como ven en el mercado de desarrolladores en estelar particularmente hay otras otras otras blocks en snow como dice jon solana y que están concentrando talento ven que pueden ser un chalet para escalar o desarrollar nuevas ideas + +[28:00] Creo que como emprendedores no podemos estar casados a una tecnología en particular porque lo que queremos es resolver problemas de la gente y la gente tiene problemas que puedan necesitar tecnologías diferentes desde nuestra perspectiva vemos que al dedicarnos a darle cuentas de dólares cripto a la gente y al haber tantos diferentes no tantas diferentes versiones desde cripto en diferentes plataformas o en diferentes opciónes nosotros tenemos que hacer para cumplir nuestra misión lo más versátiles y tenemos que poder ser compatibles con cualquier tecnología porque lo que importa no es qué tecnología usted es lo que importa es tu capacidad de resolver problema la gente a mí lo que me gusta es tela en comparación las otras es que es tela está muy enfocado en crear los rieles descentralizados + +[29:00] Para el dinero para conectar el dinero del mundo y. Entonces toda la inversión que hace el equipo estelar que es la verdad es un superequipo y obviamente es hoy pase al hacia decir eso porque estelares inversionista del tiempo y de las nuestras pero he trabajado ya con ellos mucho tiempo hemos trabajado ya con él mucho tiempo y en serio puedo ser testigo de la calidad del equipo y también pueden ver la cantidad de inversión que hacen en desarrollar el ecosistema no solamente trayendo a participantes como artiem o acero o ellos mismos creando sus herramientas como bailén para entender cómo se tienen que usar su tecnología desde el punto de vista desde los emprendedores también hacen por versiones con empresas gigantescas como money gram también están haciendo por ver sets con con justice y también con + +[30:00] Exchange es también con stand trabajando de cerca con los reguladores tienen enfoque 100% al rey el financiero y las otras tienen un enfoque más website vamos a crear el mundo descentralizado y. Entonces imagínense esa tarea tan gigante desde el mundo descentralizado si es una parte muy grande es que entre y salga dinero del mundo descentralizado pero la verdad que están tratando de construir todo desde cero y creo que eso es a mí como emprendedor me da muchísimo me da ansiedad pensar en que tengo que hacer todo porque no puedes hacer todo. Entonces a mi alrededor la postal estelar que está bien enfocado porque. Pues yo creo en el enfoque totalmente de acuerdo la pregunta muy interesante tú tú respetarlo yo creo que comparto ni lo que yo había pensado directamente + +[31:00] Como es aprender puntual era cada blog quién quiere solucionar alguna una vertical particular del mundo web 3 por ejemplo en jr se quiere de alguna manera solucionar la parte de gaming benefits también tenéis dentro de escribió solana la parte solana y mira hacer en spirits solana que viene a estar como defy más barato dentro de lo que la competencia consistorio yo creo que cada uno quiere tomar un rol las distintas verticales las infinitas de web 3 sin ir más lejos en acción' clean indicó movió que no le servía inferior para hacer gaming directamente salió a hacer su propia bleus en running como un ejemplo de no puede ser absolutamente todo es una gran base para estar muchas cosas pero se van a visitar distintas verticales para ejecutar otras cosas yo + +[32:00] Creo que como dijo rubens telar se dedica o el foco principal es finance y payments si ejecutan eso bien pueden llegar a crecer muchísimo con el equipo que tienen más probabilidad de ejecutarlo es más focalizado que otras blogs porque tenemos mostrar el caso de uso. Pues en trazada hasta el ardor vive es que la grandísima es cierto y pierre transactions juan con cuántas tentaciones con las transacciones se pueden hacer de qué medio qué rapidez a qué costo apuntan a eficientizar un nicho muy particular del web 3 que es donde yo. Veo que va a venir la disrupción más grande que es en la finanza lógicamente descentralizada rápida y sin exclusión buenísimo si tocamos un montón de temas no la interoperabilidad que salga una discusión no de web tri y de block change y estamos todos trabajando en no en cómo hacer que sea mucho más + +[33:00] Interoperable y bueno esto recién empieza el boxing es como como como digo como un bebé recién estamos en los primeros años y bueno estamos todos trabajando para que sea más interoperable todos los sistemas y bueno y en sus empresas también enfocándose en todos estos temas súper interesantes y las preguntas que nos hacen y bueno una otra pregunta más acerca de el ecosistema y específicamente de la roya que estamos en este tema cómo trabajan o sea cómo ven ustedes el ecosistema de estelar y si trabajan juntos o cómo es que colaboran dentro del ecosistema no solo estelar pero también en general en el ecosistema block change nosotros dentro de ese tel como comenté somos con un pilar que compara que + +[34:00] Conecta en realidad el fiat al cripto nosotros trabajamos con muchísimas empresas porque al fin y al cabo nosotros somos el proveedor de infraestructura para que las demás aplicaciones centralizadas descentralizadas puedan brillar y captar usuarios en la región nosotros trabajamos con la mayoría de los panelistas trabajamos. Bueno con artiem con buy brand producción de leds rampa fiat en argentina brasil y méxico si todo sale bien también cerca nos reserva del servicio cronos pero. Bueno trabajamos tanto en el ecosistema estelar como también en otros ecosistemas como pueden ser incidió y todo el de hace 20 proveyendo la liquidez y el servicio de pasarela de pagos para aquellos usuarios que quieran empezar a utilizar cripto dentro de alguna de todas las aplicaciones nosotros dentro de eso es dentro de lo que es el fiat honran también tenemos como comenté en nuestros table coins el arte y el barrilete que serían el + +[35:00] Peso y el técnico en argentino y ex técnico brasilero están totalmente en inglés para ser usadas por el que quiera el ecosistema dentro de lo que son los sets miembro que es la integración constelar así que cualquiera que lo quiera estar lo puede utilizarlo y puede ser un medio de pago para cualquiera cualquier aplicación que quiera copiar dentro del ecosistema de este nada sí. Bueno como argentina obviamente un orgullo el rct que para los que están escuchando que es las table comenté del peso argentino no y bueno yo siempre digo que ese número word o sea de un paso muy grande en ser uno de los primeros en emitir la moneda del peso argentino y bueno y poder comenzar a difundirlo en el ecosistema no yo soy una fan de rct para vos y bueno milagros + +[36:00] O sea bailan funciona con adr siete horas ya que estamos en este tema. Bueno nos contás un poco de cuál es la función de la rst en en balance bárbaro. Bueno en nuestro caso también respondiendo t un poco la pregunta de cómo trabajamos junto a stella nosotros trabajamos mano a mano para intentar solucionar o quizá resolver un problema muy específico e importante en argentina que es el recuerdo de valores probablemente nos estén viendo desde distintos países de latinoamérica pero. Bueno como la gran mayoría sabe argentina lamentablemente tiene una economía inestable esto lleva consigo digamos ciertas consecuencias una de las más notorias es la fragilidad que tiene la manera de la cual. Bueno lamentablemente ya nos hemos acostumbrado pero. Bueno frente a eso los argentinos solemos buscar diferentes alternativas para poder resguardar el valor de nuestro dinero acá en argentina + +[37:00] Uno de los recursos más usados es. Bueno la compra de dólares pero con las trabas digamos que tenemos y además los riesgos de poder quizá comprar en un mercado paralelo los argentinos empezaron a buscar otras alternativas. Entonces. Bueno. Gracias al avance de blogs en y después de trabajar junto a éste le are una opción súper interesante hoy en día es es la compra cripto que no solamente permite digamos resguardar valor que creo que es de nuevo nuestro problema número uno a mi entender sino también darnos acceso a esta economía 100% digital y también trabajando por ejemplo junto a pelear trabajar y tener digamos nuevos servicios por ejemplo y bueno volviendo al tema de la receta para existe es un activo coin anclada digamos al valor del peso argentino en donde. Bueno mediante el depósito de empezó los argentinos uno dentro de la app puede cambiar esos arzt hay oasis y literario así si en la en la + +[38:00] Billetera de by brand buenísimo. Entonces la de recete te permite acceder a la tv con de dólares como el puente entre la moneda fiat y las tv con de dólar que por ahí es lo que lo que quiere acceder a la gente así que buenísimo me encanta eso si usted quiere hacer un comentario va a ser por el tema del ecosistema estaban hablando lo positivo que yo. Veo digamos dentro de los entornos es que lara otros ecosistemas nos estamos trabajando es que que nos une algo particular que es el tema este de trabajar por una solución que que impacta en el día a día a todas las personas. Entonces de ese lado yo creo que se genera una camaradería muy particular entre entre todos yo conozco gente dentro del ecosistema que estuvimos hasta en otros proyectos que. Ahora están abocados en estirar también y que siempre por su país o por su nacionalidad quieren hacer algo para mejorar esta situación y + +[39:00] Siempre tomamos un tema que más remesas temas de mantener el valor de su moneda local el tema de ayudar a sus familiares está en otro país. Entonces yo creo que son que cae el blog tienen una situación que nos comparte a todos los de los países emergentes por eso es que se genera esta camaradería para tratar de ayudarnos unos a otros no con los chicos particularmente en el tema de desarrollo también que hablaba yo comparto que la decantación de los blogs en que va a ver más adelante va a estar que de todo lo que hay va a haber una depuración y por las puntocom del mundo el momento cuando arrancó todo van a quedar obviamente en las que tienen un foco muy puntual en un en un mercado en un vertical y que son los mejores. Entonces básicamente nosotros vamos a instalar porque está muy claro el objetivo y no es digamos un multirrubro donde tener muchas + +[40:00] Soluciones para todos sino que es optimización de temas financieros parte de remesa y tupí y yo creo que ahí es donde ese foco. Pues el que suba mucho para las soluciones que queremos que nuestra meta si me parece que estará. Bueno muy alineados como también los objetivos no de estelar una empresa como una empresa como cronos que hay un término de lo que apuntan y bueno me encanta me encanta escuchar en la industria blogs como todos trabajan juntos o sea me parece algo sobre sobre saltar porque hay más que nada eso es un emprendedor todos sabemos que el camino del emprendedor a veces es súper solitario y muy difícil. Entonces es tener este estar como camaradería y esta ayuda y este soporte al final del día me parece algo súper importante totalmente yo creo que eso es algo que sobresale del mundo cripto aunque nosotros mucho entre sí siempre creo que + +[41:00] Surgen todos los llamados que estamos si hablando en algún panel de que siempre al terminar todos los panelista están realmente muy abiertos a que se contacten con ellos siempre con twitter siempre con mailing telegram cualquier duda avisarnos si tenemos un llamado si nos podemos derivar para sea creo que sea el principio donde los que estábamos metidos en el mundo cripto queríamos ayudar a la gente a que se metiera y que se educara porque el riesgo de la falta de educación es muy grande. Entonces creo que más que nada se ve mucho en empresas trabajando entre sí para generar algo más importante y los emprendedores del mundo cripto queriendo ayudar y educar a la gente que hoy no está metida en el tema para guiarlos para dónde tienen que ir de qué forma cuál es la forma más rápida para nosotros inserción en muchas propuestas de startups que quieren empezar a crear el mundo estelar no saben nada de los sets no saben cómo integrarse no saben que es un autor y la verdad es que invertimos mucho tiempo en tener esos llamados a aceptarlos y realmente comentarles por + +[42:00] Dónde y qué porque también tienen que ir por la mente hay que ir y en el momento en que lleguen a tener el tamaño y el nivel de mi código que se adquiere para utilizarse utilizar cualquiera de estos productos o cualquier servicio no podemos colaborar vuelvan a estar en contacto con nosotros bien y hay muchos que los hemos ayudado realmente a entender que era un ser a entender cuáles serán los pasos a seguir y cómo serán los iba a ayudar o no les iba ayudar para lo que ellos querían crear yo creo que eso es una de las cualidades más importantes que tiene el mundo escrito y así se cree creció en esa descentralización de ayudarnos entre dos agresores si eso. Quiero resaltar de vuelta o sea ese aspecto de la industria y bueno ahí también del ecosistema de Stellar no en tener un poder por ahí llamarte a voz está aquí y no hacerte esas preguntas y tener esa guía que te puede ahorrar no o sea no sólo no son un montón de tiempo pero sino también cómo tener esa + +[43:00] Confianza de cómo empezar y como meterte en el ecosistema y cómo salir adelante totalmente yo siempre me quise poner el punto de sitio fue que estoy haciendo una pregunta que me encantaría que alimentado estuviera contando esto que serían los pasos él ya vivió me el futuro de la integración contame cómo como se ve como parece y dónde está el camino que tengo que seguir poder ayudar a gente a de a no confundirse yo creo que es ahorrar el tiempo que todo el ecosistema crezca y me ha pasado una persona muy seguido y bueno siguiendo con el tema de que consideran necesario para desarrollar la integración de blogs jane en un startup o en una empresa nueva me encantaría por ahí escuchar un poco de rubén de de qué consejos le darías a alguien que está empezando. Pues creo que de la misma forma en la que [Música] o más bien alineada con el método de la + +[44:00] Tecnología descentralizada creo que así como todos los emprendedores están bastante abiertos quizás si no les contesta lides por estar ocupados pero seguramente todas estarían dispuestos a ayudar también toda la información que uno necesita para poder enmarcada en esta tecnología y adoptarla está en todos lados en internet está abierto a todos sus profesores eso que decía de ron y el fuerte de taxi para poder tener una versión que fuera hecha ajusta la medida de las necesidades y habla de lo accesible que es ellos pueden agarrar copiar y pegar su clon del exterior o hacer los cambios que necesitaban para que estas acciones pidieran más ajuste a su forma dio + +[45:00] Puedes dejarlo hay una vulnerabilidad y perdieron muchísimo tajín espejo al mismo tiempo creo que habla de lo accesible que es para quien sea y. Entonces. Ahora sí y responsabilidad desde el emprendedor debe estar buscando en el internet las fuentes más fiables y creo que estelar en comparación a las otras tecnologías han hecho que trabajarán con su teclado sea súper fácil o sea todo lo que ponen en el lenguaje web 2 casi casi por medio de casey cosas súper amigables para que un desarrollador que empresarialmente haya ya trabajado en el mundo lo pueda entender cómo aplicar la tecnología hay muchísimos recursos y todo lo que desarrollan para ayudar a todas las empresas tan aquí todo es de + +[46:00] Todos. Entonces es una gran forma yo creo que el papá de empezar puedes decir qué como este como que te dan todo un framework para tanque de forma fácil puedas desarrollar tu aplicación y estoy haciendo una simplificación ya saben mi identidad. Soy un tonto pero. Veo que a nuestro equipo que nunca digo obviamente no todas las profesiones tienen su propio lenguaje como esté bien sólido y estas cosas pero nadie ha hecho nada de eso sin embargo de una forma bastante familiar esté la documentación y su infraestructura está hecha para que nuestros arrolladores que es con desarrolladores back-end normales puedan desde el llar interactuar y utilizar y probar la tecnología. Entonces a distintos recursos de este lado ese es un re. Bueno muy buen punto para los desarrolladores que + +[47:00] Siempre la primera pregunta es necesito saber su olivetti para desarrollar y bueno en eso la tenés este caso eficaz no de varios lenguajes. Entonces eso te facilita que por ahí siendo desarrollador de y experto en algún otro lenguaje y pueda centrar y empezar fácilmente así que me presionan super one punto. Bueno y si hablamos de blogs en iu y podría hablar horas acerca de blogs sin peros de que por allá a la gente que nos está escuchando le también les interesa saber un poco más de ustedes no. Entonces quería ver si tienen alguna historia o alguna anécdota hacer que una usted de ser emprendedor o del ecosistema que quieran compartir con los que nos están escuchando yo creo que en los últimos años la anécdota es como una summer y de como parece el mercado y cómo se vería hace + +[48:00] Cuatro años y cómo se va a ver en los próximos cuatro años en emprender hace cuatro años éramos unos locos que todos los bancos llamábamos y nos cortaban el teléfono todos creían que trabajamos con narcotraficantes y todos temían y realmente cuando nosotros hacemos todo lo posible para estar en lo más en regla posible hoy es totalmente lo opuesto tenemos bancos que nos están llamando para ver de qué manera puede causar cripto y cómo pueden innovar en la base de cristo' yo creo que los próximos cuatro años se va a ver que todos los bancos van a enterar cripto de alguna manera u otra y se va a convertir como en la norma y desde el punto de hace cuatro años será parecemos unos locos y no creamos hoy es si te sentís un luego que yo estás creando quizás puede llegar hasta el buen camino y cuatro años a hero porque tratando de crear un producto que hoy quizás es muy innovador para lo que + +[49:00] Se viene en un futuro sí buenísimo matías alguna historia para compartir si. Ahora es un comentario que tuve de un conocido y si digamos que fue una charla más de café cuando le empecé a aplicar digamos un poco que yo creo que va para todos los que en algún momento empezamos con un blog chinito que uno cuando ve todas las cosas que se pueden hacer certificación de documentos la aplicación que puede tener esos finanzas todo el amplio digamos aplicación que tiene la parte financiera a yo aquí y bueno un montón de tecnologías de para aplicar y un poco fui con toda esa inercia de emprendedor juniors y sin leche mía tiene ideas a estar todo esto y se me quedó mirando y me dice mira la realidad es que ya siendo un vice él no dice justamente es lo que es lo que no tienes que hacer me dice + +[50:00] Vos tenés que tratar de buscar una solución enfocarte y darle al 100% por esa solución una es la decisión más fácil porque uno con la energía y con las ganas que trae que dice con bloques vamos a hacer un montón de cosas y tenés un montón de cosas serias como emprendedor también que empiezas con esto pero si yo puedo sumar esto puedo tener una solución más grande para mí como consejo es poner foco dar todos los contactos toda la energía y todo el 100% en una solución y no tratar de marte y tener el derecho al que puedo hacer todo si no va a definir quién no hacer y solamente definir las cosas un par de cosas para hacer y no todas las que se pueden solucionarlo buenísimo si el enfoque es súper importante milagros alguna anécdota que nos quieras contar. Bueno sin cuento algunas ánimos doctora + +[51:00] Bueno a mí me pasó que cuando arranque trip to blue change a trabajar en el ecosistema cuando tenía que explicarle quizás algún amigo o un familiar que hacía era como que no entendían nada trato de explicarles que era blog de jim que hacían by brandt y era como súper difícil que que todos puedan entender pero. Bueno un poco a poco empecé a mostrarles a con mucha paciencia a cada uno de mis familiares y tíos incluso mi abuelo cómo comprar yo es bicis de by brand que es súper fácil y. Ahora un año después es como que no puedo creerlo porque viene me sabía si me dice nada y compré yo decidió hice casting en ebay 26 callao no sé que está buenísimo y tendría también mi tío vino y me comentó que había comprado ya sí sí la verdad es que me da como mucho orgullo que en un año pueda haber digamos se ha convencido a muchos familiares y hay mucha gente haber entrado en el ecosistema y creo + +[52:00] Que eso es súper importante y bueno una anécdota que siempre me gusta contar que en un año puedes hacer muchas cosas y bueno incluir a mucha gente en el ecosistema sí me parece súper interesante y con esta historia a ver cómo como de alguna manera de trabajar en blogs y también toda la gente que está alrededor tuyo no tu familia tus amigos como de cierta manera empiezan a entrar y enterarse y bueno eso nos parece súper importante rubén alguna historia tuya. Pues voy a ofrecer me hacen un consejo creo que en la historia de arte han habido muchísimas historias de terror provocadas por la guerra + +[53:00] Incursionar en esta aventura con esta tecnología nueva nos ha hecho la vida de cierta forma es difícil porque. Pues empezamos como pioneros sabiendo nosotros lo que nos iba a hacer pero al mismo tiempo chocando con el mundo traías fibras que ya está en otro lugar pero eso nos hizo el principio al principio levantar en lo difícil convencer a la gente trabajar con nosotros difícil era vamos estamos empezando a todos a evangelizar y aparte estábamos tratando de poner una parroquia que cobraba y. Entonces era era difícil sin embargo lo que siempre nos ayudó es tener en mente que lo que nosotros queríamos acceder a ayudar a la gente y tenemos una visión tan clara que eso nos permitió seguir adelante cuando las cosas van mal y yo también y aunque las cosas no son fáciles y me dan + +[54:00] Cuenta que lo que diferencia a un emprendedor. Bueno de lo que pasa es que uno se queda con el caso de algunas formas energías para así de adelante y otro de repente ya me cansé de estar haciendo. Pues tener una visión digna del tiempo y el tiempo la gente hace que tú tengas energía para poder seguir dándole todo lo que tengas el tiempo porque le tengas que dar para que sea excel exitosa tu empresa auto emprendimiento. Entonces eso estoy yo les diré a todos con un consejo tengan bien escogida sus misiones y si tienen una visión qué los mueve. Entonces es lo único que necesitan porque el resto de persistencia está pero una misión digna que te hace sacar persistencia no sé cómo funciona a + +[55:00] Veces por este consejo y bueno de vuelta escuchamos enfoque ok nos parece super interesante en términos de la de la inclusión financiera la de la inclusión financiera sí claro esa es una misión distribuida que nos alinea a todos ya todos nos da ganancia del cante. Entonces también tienen medida a una misión muy poderosa el mundo la del boxing que está muy padre buenísimo. Gracias a todos por unirse hoy + +[56:00] [Música]. Veo que tengo eco y pido disculpas no sé por qué pero quería cerrar con hablándole a todos y ha respondido una pregunta que vio en el chat que es cómo podemos estar más envueltos con el ecosistema y bueno estelar está haciendo un evento en méxico no solo en méxico sino también virtual en mayo que a los que todos pueden unirse es del 20 al 22 de mayo y se llama texto startup weekend o blogs en méxico city super largo pero. Bueno voy a poner el link a canal chat para que para que lo vean y bueno para que según el co sistema para que se animen y que les queremos decir a todos que. Bueno que que no hay ninguna pregunta que no + +[57:00] Sea válida que nos contacten y. Quiero agradecer a este panel. Es un honor a estar acá con ustedes y de vuelta o sea las empresas que han construido son son las que miramos todos los días y estamos siempre esperando ver cómo crecen más y más día a día y esperamos escuchar mucho más de ustedes y de sus empresas muchísimas. Gracias por la invitación a brad animales yo la pasé increíble y espero que los que nos escucharan del otro lado se hayan llevado algunas y algunos que pons y vuelta yo personalmente totalmente abierto para ayudarlos en lo que necesiten de este camino el camino emprendedor el camino de querer crear crecer y crear algo sí. Bueno me pueden contactar a través de la página del set el twitter es arroba jacques saracco cualquier siempre los mensajes tan abiertos + +[58:00] Igualmente muchas. Gracias por todo no pueden encontrar como rubén que ser pre en twitter les pido que visitan si es que necesitan de mí porque luego puse malo contestando pero. Bueno no es formar la discusión y. Gracias mercedes ya todos por por participar en este papel me dio mucho hoy les deseo un gran fin de semana nos estamos viendo. Bueno. Gracias por todos de mi parte los invito a seguirnos en redes sociales ya descargarse by brandy y que la prueben se vienen muchas cosas nuevas en la app así que. Bueno los invito a descargarse la está disponible en ios android y también. Bueno después a disposición de ustedes si necesitan algo tienen alguna duda con respecto a este caso o ireland como por mi lado también. Gracias al panel realmente muy. Bueno todo lo que están construyendo muy contento estar + +[59:00] Acá con ustedes un saludo también a todos los emprendedores ya todos los que quieran involucrarse a este lado o en cualquier tema de blogs ching totalmente dispuesto así que muchas. Gracias mercedes y ató al equipo esto muchas. Gracias a todos y nos vemos en el próximo evento de estelar muchas. Gracias + +
diff --git a/meeting-notes/2024-01-18.mdx b/meeting-notes/2024-01-18.mdx deleted file mode 100644 index 0b6c16e6f2..0000000000 --- a/meeting-notes/2024-01-18.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "2024-01-18" -authors: naman -tags: [protocol] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/1196897067445010452) - -1. The need for zk-enabling encryption curves like BLS12-381. [Github thread](https://github.com/stellar/rs-soroban-env/issues/779). -1. Use cases that ecosystem is interestd in: - 1. Excellar, i.e. folks that kicked off this conversation by submitting a [PR for BLS12-381](https://github.com/stellar/rs-soroban-env/pull/1310), wants to add a DAO-controlled oracle where the elliptical curve provides the ability to add new DAO voters - 2. Zkbricks wants to build an L2 system for that enables secret state for arbitrary smart contracts - 3. Skyhitz wants to use stellar for efficient compute, cost, and scalability while using zk to prove ownership of high-value assets on another chain - 4. Use case enumeration continues in the [discord thread](https://discord.com/channels/897514728459468821/1197663875512942653). -1. Considerations for host function implementation - 1. Core devs questioned whether BLS12-381 was the right curve and also highlighted the need to determine the correct level of abstraction given there is a tradeoff between flexibility and efficiency. Lower level of abstraction will enable more flexibility but result in more hot loops in the wasm while a higher level of abstraction will be highly efficient but will restrict generality. - 2. ZkBricks thought that there is a need to directly expose pairings and group operations without any level of abstraction. The space is in active development and flexibility is needed to try out new approaches and proof systems. From the point of view of crypto agility, it would be good to expose a generic interface that supports a variety of curves in the backend. -1. Path Forward - 1. Core devs mentioned crypto curves can be experimented locally by linking rust crates, which it turns out, had failed in the past. This will be explored and fixed. - 2. ZkBricks and others will prototype locally and provide feedback. -1. What are the best practices for managing transactions in the frontend, with respect to transaction ordering. -1. Core devs confirmed that ordering is intentionally arbitrary. -1. Request for an API for current version of the environment/sdk -1. Github issue filed for the RPC to return versions of the current node. diff --git a/meeting-notes/2024-01-26.mdx b/meeting-notes/2024-01-26.mdx deleted file mode 100644 index 1156f4b2cc..0000000000 --- a/meeting-notes/2024-01-26.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "2024-01-26" -authors: kalepail -tags: [developer] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/1199121845656486009) - -1. Plan and schedule for these meetings - 1. Protocol meetings every other Thursday at 4pm ET - 2. Developer meetings every other Friday at 1pm ET - 3. Will continue to adjust as needed -2. Fee bump bug - [announcement](https://stellar.org/blog/developers/fee-bump-bug-disclosure) | [discussion thread](https://discord.com/channels/897514728459468821/1200432249594707998/1200432306314281000) - 1. Fee sponsorship bug: unused fee is refunded to the inner tx source rather than the sponsor source. - 2. Fix in new release. Up to the ecosystem and validators to upgrade. The fix will likely be rolled out before Phase 2. - 3. Up to validators to determine if they’d like to push the v20 upgrade date to wait for the fix; or upgrade with current release. -3. TxMeta Deprecation in Horizon - [announcement](https://discord.com/channels/897514728459468821/900374272751591424/1199438109796999298) -4. Ideas around testing against ledger snapshots - [request](https://discord.com/channels/897514728459468821/1199121845656486009/1199158421254049912) - 1. Define the needs a bit more clearly - 2. Definitely something here we should be addressing to make testing against specific ledger state easier -5. How do you get a list of smart contracts? - [thread](https://discord.com/channels/897514728459468821/1199121845656486009/1199739331078803496) - 1. Observe create contract ops as ledgers close - 2. Use an [indexing service](/docs/data/indexers) -6. What is the status of contracts caching support? - [question](https://discord.com/channels/897514728459468821/1199121845656486009/1200484710447587490) - 1. [response](https://discord.com/channels/897514728459468821/1199121845656486009/1200516877680644276) diff --git a/meeting-notes/2024-02-01.mdx b/meeting-notes/2024-02-01.mdx deleted file mode 100644 index f581603ba8..0000000000 --- a/meeting-notes/2024-02-01.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "2024-02-01" -authors: naman -tags: [protocol] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/1201979721211203614) - -1. The proposal is to advance stellar-core by adding a host function to verify the secp256r1 signature, which is the most common elliptic curve used outside of the blockchain space. It is useful in connecting off-chain authentication interfaces with on-chain functionality. -1. Note that the proposal is not for a new signer type but a host function. -1. Leigh investigated adding support for the WebAuthN use case, by allowing a custom account / smart contract to sign soroban auth entries using a secp256r1-signed payload. -1. secp256r1 is supported by phones, passkeys, and enables an app to replace passwords. This is a massive benefit to user-facing applications like wallets. -1. Pros and cons of the interface: blockchains generally implement the recovery interface over the verification interface but verification is easier for developers as it reduces burden on the client and the network. -1. The WebAuthN use case requires encoding and decoding of base64 payloads and decoding JSON blobs, which is not currently supported in Soroban. -1. While there are hacky ways of accomplishing the latter, it’s not a great developer experience and final implementation is susceptible to breakages on updates. -1. It is also costly to bundle decoding with verification in guest. -1. Soroban has always led with a batteries included mindset. Keeping in line with that approach, it makes sense to further investigate and determine whether a host function makes sense for these as well. -1. Leigh’s implementation may require further evaluation of the crates used for ecdsa and p256. -1. Brief discussion around proposed process for adding of a host function by a non-core dev. diff --git a/meeting-notes/2024-02-09.mdx b/meeting-notes/2024-02-09.mdx deleted file mode 100644 index e26bb90381..0000000000 --- a/meeting-notes/2024-02-09.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "2024-02-09" -authors: kalepail -tags: [developer] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/1204462856037470248) - -1. Stellar Asset List (SEP-0042) draft presentation by [OrbitLens](https://github.com/orbitlens) - 1. [SEP draft](https://github.com/orbitlens/stellar-protocol/blob/sep-0042-token-lists/ecosystem/sep-0042.md) - 2. [Discord discussion](https://discord.com/channels/897514728459468821/1162558946867953704) -2. Stellar + Soroban documentation survey - 1. [Take the survey](https://discord.com/channels/897514728459468821/1204462856037470248/1205196745877757962) diff --git a/meeting-notes/2024-02-22.mdx b/meeting-notes/2024-02-22.mdx deleted file mode 100644 index 3883461b47..0000000000 --- a/meeting-notes/2024-02-22.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "2024-02-22" -authors: kalepail -tags: [developer] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/1209582245824823337) - -1. Latest and greatest on the TypeScript bindings with [@chadoh](https://github.com/chadoh) -2. [Available RPC providers](/docs/data/apis/rpc/providers) -3. Standing up a [`stellar-rpc` docker container](/docs/data/apis/rpc/admin-guide#docker-image) -4. Installing and invoking a Stellar Asset Contract on mainnet in Phase 0 diff --git a/meeting-notes/2024-02-29.mdx b/meeting-notes/2024-02-29.mdx deleted file mode 100644 index be09cdc8a0..0000000000 --- a/meeting-notes/2024-02-29.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "2024-02-29" -authors: naman -tags: [protocol] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/1212118102565855243) - -1. Tommaso (@tdep) proposed a core change to allow for extending instance and code TTL with separate values on the host environment to allow for more cost-efficient designs a. Proposal and discussion are captured in github discussions [stellar-core#1447](https://github.com/stellar/stellar-protocol/discussions/1447) -2. Tommaso received feedback on the proposal as well as implementation. Since it didn't require a metering change, core devs thought it to be a quick change. -3. The ecosystem voted in favor of the proposal by upvoting the post on Github Discussions. 13 votes were [recorded](https://github.com/stellar/stellar-protocol/discussions/). -4. As next steps, a CAP will be authored to capture the proposal and put forth for approval from CAP Core Team. diff --git a/meeting-notes/2024-03-07.mdx b/meeting-notes/2024-03-07.mdx deleted file mode 100644 index 693e1fcdf7..0000000000 --- a/meeting-notes/2024-03-07.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "2024-03-07" -authors: kalepail -tags: [developer] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/911254664576643122/1215404506964172890) - -1. [Sorobill tool](https://github.com/kalepail/sorobill) -2. Deploying contracts and testing invocations against the unlimited [quickstart](https://github.com/stellar/quickstart). -3. Using Sorobil package as a tool to get a snapshot of all the limits the contract is touching. [Relevant blog post](https://kalepail.com/blockchain/show-me-the-bill-part-2) -4. How to measure the contract costs and what limits you are touching -5. Utilizing the Sorobil tool to decode XDR and understand failed transactions. diff --git a/meeting-notes/2024-03-14.mdx b/meeting-notes/2024-03-14.mdx deleted file mode 100644 index d5894a8414..0000000000 --- a/meeting-notes/2024-03-14.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "2024-03-14" -authors: naman -tags: [protocol] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/1217193723612368926) - -1. CAP Core Team deliberated over the latest proposals put forth by the Stellar ecosystem to advance stellar-core. -2. Nicholas and David from the CAP Core Team listened to the following proposals and discussed the proposals with the authors. a. CAP Core team will deliver their vote over email. -3. Proposals discussed: - a. [CAP-51](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0051.md): add support for secp256r1 verification; by @leigh - b. [CAP-53](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0053.md): create separate functions for extending the time-to-live for contract instance and contract code; by @tdep - c. [CAP-54](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0054.md): lower total costs by refining the Soroban cost model used for VM instantiation into multiple separate and more-accurate costs; by @graydon - d. [CAP-55](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0055.md): lower total costs by linking fewer host functions during VM instantiation in Soroban; by @graydon - e. [CAP-56](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0056.md): lower total costs by caching parsed Wasm modules within a Soroban transaction; by @graydon diff --git a/meeting-notes/2024-03-21.mdx b/meeting-notes/2024-03-21.mdx deleted file mode 100644 index 7fea7f2580..0000000000 --- a/meeting-notes/2024-03-21.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "2024-03-21" -authors: kalepail -tags: [developer] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/1219381314931917000) - -1. There's a discussion on a TX meta change increasing visibility and analytics within the Stellar network. (https://github.com/stellar/stellar-xdr/pull/175) -2. Read-only invocations for contracts is explained, focusing on ensuring certain functions remain read-only without side effects. (https://github.com/stellar/stellar-protocol/discussions/1454) (https://github.com/stellar/stellar-protocol/discussions/1456) (https://github.com/stellar/stellar-protocol/discussions/1464) -3. Enabling contract discovery is introduced to enhance the visibility and authenticity of smart contracts within the Stellar ecosystem. -4. The implementation of a standardized contract meta data schema is proposed to link contracts to source code and enhance contract discoverability.(https://docs.rs/soroban-sdk/latest/soroban_sdk/macro.contractmeta.html) -5. Issues related to inclusion fees in the Stellar network, highlighting the importance of monitoring fees closely and understanding the implications of [surge pricing](/docs/learn/fundamentals/fees-resource-limits-metering#inclusion-fee) -6. Plans are discussed for designing a new RPC endpoint to provide developers with better visibility and information on setting inclusion fees, aiming to improve transparency and decision-making regarding fee trade-offs. diff --git a/meeting-notes/2024-03-28.mdx b/meeting-notes/2024-03-28.mdx deleted file mode 100644 index 32dadf84e7..0000000000 --- a/meeting-notes/2024-03-28.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "2024-03-28" -authors: naman -tags: [protocol] ---- - - - -[Agenda thread](https://github.com/stellar/stellar-protocol/discussions/1475) - -1. The Standards Working Group proposed changes to the SEP process that empower the ecosystem by making the current process more decentralized and ecosystem-friendly. -2. The process has already been used for several proposals over the last three months. -3. Esteblock from Soroswap shared their journey of participating in the proposal for Asset List ([SEP-42](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0042.md)) and implementing the proposed standard -4. Discussion continues in the proposal doc. -5. Next step is to get further ecosystem feedback then update the Github SEP repository with the updated SEP process. diff --git a/meeting-notes/2024-04-04.mdx b/meeting-notes/2024-04-04.mdx deleted file mode 100644 index d9310c7d72..0000000000 --- a/meeting-notes/2024-04-04.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: "2024-04-04" -authors: naman -tags: [developer] ---- - -Today's recording has two parts. The first 12 minutes are audio-only. The next 45 minutes have video as well. Please note the slides were shared in discord chat over screensharing, due to technical difficulties. - -Part 1 (audio-only): - - - -Part 2 (video): - - - -[Discord Agenda thread](https://discord.com/channels/897514728459468821/1224408179363024918) - -1. Piyal surfaced the proposal for a [Wallet Standard](https://github.com/stellar/stellar-protocol/discussions/1467) and requested feedback. -2. Cubist, an ecosystem project, discussed CubeSigner, a low-latency API for generating keys and signing transactions inside secure hardware. -3. Stellar-based example of CubeSigner is available in the [Cubist Labs Github repository](https://github.com/cubist-labs/CubeSigner-TypeScript-SDK/tree/main/examples/stellar) -4. Cubist devs can be contacted via the Stellar discord or the [web form](https://cubist.dev/contact-form-cubesigner-hardware-backed-key-management). diff --git a/meeting-notes/2024-04-11.mdx b/meeting-notes/2024-04-11.mdx deleted file mode 100644 index d8e5ff05e4..0000000000 --- a/meeting-notes/2024-04-11.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "2024-04-11" -authors: naman -tags: [protocol] ---- - - - -Piyal from Freighter discussed the proposal to standardize the wallet interface. Key points from the discussion are captured below. For full notes, please view the recording; and also refer to the proposal and the post on github discussions. - -1. [The draft proposal](https://github.com/stellar/stellar-protocol/blob/83191be659166e05f8df1257c6f655de9d1afe63/ecosystem/sep-0043.md) -2. [Ongoing discussion](https://github.com/stellar/stellar-protocol/discussions/1467) -3. Requiring the network passphrase might be uneeded complexity. -4. Both WalletConnect and mobile wallets likely have significant differences than the proposed interface (which is targetted at browser extension wallets); and thus likely require a separate SEP. The said SEPs should be created by teams working on wallet integration with the said platforms. -5. What is the role of the Stellar Wallet Kit in the ecosystem and how does it play with the standard itself? -6. As next steps, Piyal will incorporate the suggestions from the ecosystem into the proposal. diff --git a/meeting-notes/2024-04-18.mdx b/meeting-notes/2024-04-18.mdx deleted file mode 100644 index 83f671c5dc..0000000000 --- a/meeting-notes/2024-04-18.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "2024-04-18" -authors: kalepail -tags: [developer] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/911254664576643122/1215404506964172890) - -1. Justin from [ortege.ai](https://www.ortege.ai/) demo'd Ortege, a data analytics platform for Stellar and Soroban. -2. Ortege lets anyone in the Stellar ecosystem create dashboards to track any and all desired metrics. Ortege's queries, widgets, and dashboards are shareable making it the perfect platform to surface -3. Justin will be releasing an AI soon and enable querying and insights via natural language. -4. All are invited to create a free account and track the success metrics for their dashboard. diff --git a/meeting-notes/2024-04-25.mdx b/meeting-notes/2024-04-25.mdx deleted file mode 100644 index 0d8394d5be..0000000000 --- a/meeting-notes/2024-04-25.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "2024-04-25" -authors: naman -tags: [protocol] ---- - - - -1. Garand discussed changes to the State Archival proposal based on feedback received at Meridian 2023. The proposed changes are: - -- Previously, a downstream system called the ESS (Expired State Store) would store expired entries. In the new proposal, There is no ESS. All Archived entries, as well as all information required to generate restoration proofs for those entries, is stored directly in the History Archive. -- RPC nodes can generate proofs for archived state during preflight -- Captive-core can be directly queried for archived state, meaning that RPC/Horizon instances can potentially service queries for archival state - -2. [The draft proposal](https://docs.google.com/document/d/1FAs3Yfo-o-gVqccrP29NSG8ysRvdEoyvuL7ywV4ijXI/edit#heading=h.1xwsoyifxbfm) -3. [Ongoing discussion](https://github.com/stellar/stellar-protocol/discussions/1480) -4. Snapshot size is TBD; it's a function of bucket list size as well as memory and historic demands placed on the RPC. -5. Bloom filters are the likely solution for proof of non-exitance though they come with trade-offs. They enable fast and cheap lookup but are probabilistic not deterministic. -6. Further comments are welcome. diff --git a/meeting-notes/2024-05-02.mdx b/meeting-notes/2024-05-02.mdx deleted file mode 100644 index a595895d24..0000000000 --- a/meeting-notes/2024-05-02.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "2024-05-02" -authors: naman -tags: [developer] ---- - - - -[Discord Agenda thread](https://discord.com/channels/897514728459468821/1234887262530048010/1234887262530048010) - -1. Fifo presented [Stellar Plus](https://docs.cheesecakelabs.com/stellar-plus), which is a Javascript library to simplify the Stellar and Soroban development. -2. Ecosystem members found the design for Stellar Plus composable and encompassing of all Stellar related functionality including management of assets, accounts, wasm-related operations, as well as RPC utils. -3. The links to Fifo's presentation and Stellar Plus are: - - [Miro Board showing Stellar Plus architecture](https://miro.com/app/board/uXjVKMDkMPI=/?share_link_id=643609701897) - - [Stellar Plus Repositore](https://discord.com/channels/897514728459468821/1234887262530048010/1235699608274079865) - - [Examples Repository](https://github.com/fazzatti/stellar-plus-examples) - - [Docs](https://docs.cheesecakelabs.com/stellar-plus) diff --git a/meeting-notes/2024-05-09.mdx b/meeting-notes/2024-05-09.mdx deleted file mode 100644 index 12556769d8..0000000000 --- a/meeting-notes/2024-05-09.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "2024-05-09" -authors: naman -tags: [developer] ---- - - - -1. Tyler built a voting application using passkeys to sign the transaction, which is an implementation of the secp256r1 verification function. -2. He showed a cross-platform implementation (web and mobile) and demonstrated that passkeys are the perfect interface between web3 contracts and web2 authentication mechanisms that most end users are accostomed to. -3. Ecosystem members discussed the use of smart wallets that would use passkeys as a signer. Challenges were identified around fees requires for smart wallets, the need for a common implementation for a smart wallet, as well as how might it interface with existing password managers. -4. The voting application can be tried out at [passkey.sorobanbyexample.org/](https://passkey.sorobanbyexample.org/) -5. Code for the demo is here [github.com/kalepail/soroban-passkey](https://github.com/kalepail/soroban-passkey) diff --git a/meeting-notes/2024-06-13.mdx b/meeting-notes/2024-06-13.mdx deleted file mode 100644 index 9dafb9aa82..0000000000 --- a/meeting-notes/2024-06-13.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "2024-06-13" -authors: kalepail -tags: [developer] ---- - - - -1. Tyler created Super Peach, a web3 application that uses passkeys to sign transactions. He demonstrated how passkeys can be used in authorization flows and how they can be used to sign transactions. - - Code: https://github.com/kalepail/superpeach - - Demo: https://superpeach.xyz -2. Introduced `passkey-kit`. A TypeScript SDK for creating and managing Smart Wallets via passkeys (includes the actual [Smart Wallet interface](https://github.com/kalepail/passkey-kit/tree/main/contracts)) - - Code: https://github.com/kalepail/passkey-kit - - Demo: https://passkey-kit-demo.pages.dev -3. Introduced Launchtube, a service for submitting transactions onchain by covering both the transaction fee AND the sequence number. Wild! - - Code: https://github.com/kalepail/launchtube (ask in the `#passkeys` channel on Discord for a testnet token) -4. He shared his vision for pushing the passkey implementation through to becoming a [standard for the ecosystem](https://docs.google.com/document/d/1c_Wom6eK1UpC3E7VuQZfOBCLc2d5lvqAhMN7VPieMBQ/edit). - -Join the `#passkeys` channel on the Discord to continue the discussion diff --git a/meeting-notes/2024-06-20.mdx b/meeting-notes/2024-06-20.mdx deleted file mode 100644 index 48ba433125..0000000000 --- a/meeting-notes/2024-06-20.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "2024-06-20" -authors: naman -tags: [developer] ---- - - - -1. [Kirckz discusses Meru](https://docs.google.com/presentation/d/1Fu4AkB0mrvOkK6UDFJHgKwCV-Ul4JRF-xPqTYJ3CQqw), a Financial services app for Freelancers and remote workers in Latin America. -2. He shares his experience integrating Meru with Blend, a liquidity protocol primitive for Stellar. -3. Kirckz shares the challenges faced during the integration and how they were overcome. -4. He shares the sdks and libraries his team used to facilitate the integration. - -Follow Meru on X (formerly Twitter) to stay updated: https://x.com/getmeru diff --git a/meeting-notes/2024-06-27.mdx b/meeting-notes/2024-06-27.mdx deleted file mode 100644 index f98ddae9c5..0000000000 --- a/meeting-notes/2024-06-27.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "2024-06-27" -authors: naman -tags: [developer] ---- - - - -1. [Chad](https://github.com/chadoh) and [Willem](https://github.com/willemneal) from [Aha Labs](https://github.com/AhaLabs) discuss the updates to the new and improved [stellar-cli](https://github.com/stellar/stellar-cli) -2. Some highlights include the change from the name 'soroban' to 'stellar' when using the CLI tool and the addition of new commands. -3. They cover some cool functions like local network setup, account creation, and transaction signing. with the following commands: - - `stellar network container [start|logs]` - - `stellar keys [generate|fund|ls]` - - `stellar contract init` to get a whole new Soroban project - - and so much more! -4. They also discuss expected future updates including support for more Stellar operations and integration with Stellar Lab V2! diff --git a/meeting-notes/2024-07-11.mdx b/meeting-notes/2024-07-11.mdx deleted file mode 100644 index 3d2fda2bf6..0000000000 --- a/meeting-notes/2024-07-11.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "2024-07-11" -authors: naman -tags: [developer] ---- - - - -1. SDF Data team gave a crash course in analysis of Stellar data, and covered how to access Hubble, tips for efficient querying, and how to get started with data exploration. -2. [Slides](https://docs.google.com/presentation/d/1QsCwFLFcDF4RmNIwtSSnNrUfZb0RM0kLxOOxC7ENY5M/edit#slide=id.g2cb5821e4de_1_1143) are publicly available and legible async. -3. Tips for data anlaysis are also covered in [docs](/docs/data/analytics/hubble/analyst-guide) -4. Share your queries and post questions in #hubble in Stellar discord, which is a dedicated channel for data-related topics. diff --git a/meeting-notes/2024-07-18.mdx b/meeting-notes/2024-07-18.mdx deleted file mode 100644 index 7ca93b90e5..0000000000 --- a/meeting-notes/2024-07-18.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "2024-07-18" -authors: naman -tags: [developer] ---- - - - -Note: the first part of the call was lost. The video posted above captures the second half of the call where various ecosystem developers shared their use cases and needs for a smart wallet on Stellar. - -1. Tyler put forward a [proposal](https://github.com/stellar/stellar-protocol/discussions/1499) for a smart wallet as a public good. Given that the native auth can be overloaded by using `__check_auth`, Stellar implementation of a smart wallet is fairly straightforward. The capability to customize the auth is already built into the core protocol. -2. See the [proposal](https://github.com/stellar/stellar-protocol/discussions/1499) here and implementation [here](https://github.com/kalepail/passkey-kit/blob/main/contracts/webauthn-wallet/src/lib.rs) -3. The proposal only uses WebAuthN-based signers i.e. passkeys. It does not use ed25519, which, perhaps it should given that ~100% of the accounts on Stellar use the scheme. It also introduces the notion of temporary and admin signers to illustrate the notion that the account can be managed by multiple signers, each with a different access policy. -4. The biggest unlock with custom auth is the ability to execute custom logic. We heard from various ecosystem members about how might they us it. 4a. A dev is building a perpetual protocol and thought smart wallets could be used to automatically manage defi positions, which would be a significant improvement over status quo where the user has to constantly track assets to determine _when_ to execute a trade. 4b. Folks are excited about foregoing the seed phrase for passkeys, which is especially meaningful when onboarding net new users to the blockchain. 4c. Authorizing a cross-chain message from a different chain, especially programmatic authorization, requires an implementation of custom accounts. 4d. Some apps have noted that users prefer not to have a wallet but simply experience the value of the app, especially games. For this, the app may assign a temporary account to the user and control access via check_auth. 4c. Microtransactions without needing user sign is super interesting for apps as well. -5. This has been a very insightful meeting and we learnt about how the Stellar ecosystem plans to leverage smart wallet. Let's continue the conversation in discord! diff --git a/meeting-notes/2024-08-01.mdx b/meeting-notes/2024-08-01.mdx deleted file mode 100644 index 0c2f914ca0..0000000000 --- a/meeting-notes/2024-08-01.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "2024-08-01" -authors: naman -tags: [developer] ---- - - - -1. Piyal demonstrated that Freighter's swap functionality is now served by [Soroswap](https://soroswap.finance/). It was previously served by Stellar Dex. -2. Freighter has made integration instructions available [here](https://github.com/stellar/freighter/blob/d248f2ad0aa03da72ea6eeaf7907ac0454fdcc72/extension/INTEGRATING_SOROSWAP.MD?plain=1#L2). -3. Esteban shared that [Palta Labs](https://paltalabs.io/) has created a DEX aggregator and made it available to all via the [Router SDK](https://docs.soroswap.finance/03-technical-reference/07-optimal-route/01-soroswap-router-sdk). -4. The Router SDK finds the optimal path for the swap in terms of swap cost across all dexes on Soroban. diff --git a/meeting-notes/2024-08-08.mdx b/meeting-notes/2024-08-08.mdx deleted file mode 100644 index f4c12dd10a..0000000000 --- a/meeting-notes/2024-08-08.mdx +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "2024-08-12" -authors: naman -tags: [developer] ---- - - - -1. Tdep discussed Zephyr, an execution env built on top of the indexer Mercury. He also walked through examples demonstrating how Zephyr can simplify dapp development. diff --git a/meeting-notes/2024-08-15.mdx b/meeting-notes/2024-08-15.mdx deleted file mode 100644 index 0c3bf6b5ce..0000000000 --- a/meeting-notes/2024-08-15.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "2024-08-15" -authors: julian -tags: [developer] ---- - - - -1. @Soiled and @Robeart from Orbit spoke about using Blend to create decentralized stablecoins for all currencies under the Orbit Protocol, utilizing a decentralized pegkeeper to maintain their price, and leveraging these stablecoins and smart wallets to create an orderbook less perpetual exchange, bringing Forex to Stellar - -2.[Link to the presentation](https://docs.google.com/presentation/d/1mDOrBLfe8-Bq6VCy7r5bb4w_uZjq-EOorbV3ZwYfs1k/edit?usp=sharing) - -_Note_: The hosts microphone audio is not in the video so there is some silence during Q/A. Here are the question asked during the Q/A: - -1. (From ! markus_0) why do you always have an infinite amount of tokens in the pool? Wouldn't it be safer to start small and mint more as demand opens up 2.(From HunterIonize) What purpose does this serve exactly? Sorry to be blunt -2. How do you see the Orbit Protocol contributing to financial inclusion on a global scale, particularly in underbanked regions? What challenges do you anticipate in achieving this? -3. In 5-10 years, how do you see the landscape of Forex on blockchain evolving? What role do you believe Stellar will play in this evolution, and how will Blend and Orbit Protocol be at the forefront? -4. Are there any asks of the developer community? diff --git a/meeting-notes/2024-08-22.mdx b/meeting-notes/2024-08-22.mdx deleted file mode 100644 index 32034c9ea3..0000000000 --- a/meeting-notes/2024-08-22.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: "2024-08-23" -authors: naman -tags: [protocol] ---- - - - -[Discord agenda thread](https://discord.com/channels/897514728459468821/900374272751591424/1275577430043525204) - -Core Developers discussed the latest proposals to advance Stellar Core in this week's Protocol Meeting. - -1. The proposal for addition of a constructor to Soroban’s flavor of Rust was introduced in a previous protocol meeting ([previous meeting](./2024-07-25.mdx)), documented in [CAP-58](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0058.md). A constructor is a function that will only be executed the first time the contract is created. -2. In this meeting, Dima discussed the updates made since the last meeting: - 1. Default constructor - if a constructor is not defined explicitly, the contract is treated as if it has a constructor - 2. Semantics of the return value - if the transactions succeeds, it is required to return a valid value - 3. Constructor interaction with custom accounts - custom accounts must be aware of the context that they are authorizing. -3. Graydon discussed the upgrade to the Wasmi virtual machine, documented in [CAP-60](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0060.md). Wasmi works by translating WebAssembly code to to an Internal Representation (IR) and then executing it. The upgrade is impactful in two ways. - 1. Translating from WebAssembly to IR takes longer but the execution of the resulting IR is performant. - 2. The upgrade introduces lazy compilation. Of all functions in a contract, only ones that are called in a given transaction will translated thus reducing both latency and fees. -4. Jay discussed addition of BLS12-381 cryptographic curve, documented in [CAP-59](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0059.md). - 1. Addition of pairing-friendly elliptic curves enables zk-based applications. 11 host functions have been added to expose mapping, pairing, and arithmetic on the BLS12-381 curve. - 2. Examples case of BLS signature verification was presented. It consumed 26M instructions (running natively), which is promising given the per-transaction limit is 100M. - 3. There was general agreement that the interface is the right one as it allows a contract developer to implement a wide variety of use cases. Discussion continues in discord. - 4. Jay requested that developers should build applications against the function and give feedback. diff --git a/meeting-notes/2024-08-29.mdx b/meeting-notes/2024-08-29.mdx deleted file mode 100644 index 257d09aa91..0000000000 --- a/meeting-notes/2024-08-29.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "2024-08-29" -authors: naman -tags: [protocol] ---- - - - -Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1278045556211716171) - -CAP Core team deliberated on the proposed CAPs: - -1. Addition of a constructor to Soroban's flavor of Rust. [CAP](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0058.md) - 1. Team's concern was about potential break in compatibility, which Dima had addressed. There were no further concerns. -2. Addition of BLS12-381 curve and required field arithmetics - [CAP](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0059.md) - 1. Team's concern was about providing functions to check invalid input. It's too computationally expensive to do the check at the contract layer so the it may need to be implemented as a host function. Jay is seeking ecosystem input around use cases that require strict input validation. - 2. There were no further concerns. -3. Increase performance by upgrading Soroban's VM. [CAP Discussion](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0060.md) - 1. Team's comments were about accuracy of the measurement method but the demonstrated benefits of wall clock time were thought to be promising. - 2. There was a suggestion to expose performance improvements to contract developers thus creating the incentive to optimize contracts to leverage the improvements. - 3. There were no further concerns. diff --git a/meeting-notes/2024-09-05.mdx b/meeting-notes/2024-09-05.mdx deleted file mode 100644 index 3158556ecb..0000000000 --- a/meeting-notes/2024-09-05.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "2024-09-05" -authors: anataliocs -tags: [developer] ---- - - - -Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1280678171053789317) - -Platform team demonstrated Galexie, a part of CDP(Composable Data Platform): - -1. Galexie - 1. Data Extraction: Extracts raw ledger data from the Stellar network - 2. Compression: Compresses raw data for efficient storage - 3. Storage Options: Supports runtime configuration through the Datastore abstraction to use various physical storage layers, starting with Google Cloud Storage (GCS) - 4. Modes of Operation: Can operate in either batch mode or streaming mode -2. Composable Data Platform - 1. Flexible Datastore: Multiple options for physical data storage layers - 2. Galexie: Used to extract, compress and export data to your chosen Datastore - 3. Transform: Structure data in a model suitable to your application -3. Pluggable Data Pipelines - 1. Workflows: Create ETL(extract, transform, load) pipelines - 2. Streaming: Fast, lightweight streaming data diff --git a/meeting-notes/2024-09-12.mdx b/meeting-notes/2024-09-12.mdx deleted file mode 100644 index 7d7ab794ae..0000000000 --- a/meeting-notes/2024-09-12.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "2024-09-12" -authors: carstenjacobsen -tags: [developer] ---- - - - -Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1282934024892973077) - -Developer Experience team member Nando Vieira introduced the CLI features alias and snapshot: - -1. Alias - 1. Install of Hello World example for showcasing alias - 2. Showed examples of how contract IDs are often passed as parameters in CLI commands like invoke (copying ID string or command substitution) - 3. How to deploy a smart contract and create an alias - 4. How to invoke the smart contract with the alias -2. Snapshot - 1. How to create a ledger snapshop - 2. How to use the snapshot in a test case - -Towards the end Nando went through the developer documentation, with focus on the added command line examples for Windows users, and a useful cookbook for CLI commands. diff --git a/meeting-notes/2024-09-19.mdx b/meeting-notes/2024-09-19.mdx deleted file mode 100644 index a50cd29df4..0000000000 --- a/meeting-notes/2024-09-19.mdx +++ /dev/null @@ -1,25 +0,0 @@ ---- -title: "2024-09-19" -authors: carstenjacobsen -tags: [developer] ---- - - - -Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1285627254130610297) - -SDF DevRel team member Carsten Jacobsen showed how to build a simple Hello World dapp based on a Soroban smart contract and Next.js through these steps: - -1. Create the default Hello World smart contract using the Stellar CLI -2. Create TypeScript bindings (package) using the Stellar CLI -3. Create the default Next.js using the npx create-next-app command -4. Add and link the TypeScript binding package to the Next.js project -5. Create a simple frontend with a form to submit a string -6. Import the package in the Next.js page, and setup a client -7. Create submit-function to send form value to the smart contract -8. Use useState to store the smart contract response and display it diff --git a/meeting-notes/2024-09-26.mdx b/meeting-notes/2024-09-26.mdx deleted file mode 100644 index dc009069ad..0000000000 --- a/meeting-notes/2024-09-26.mdx +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: "2024-09-26" -authors: anataliocs -tags: [developer] ---- - - - -Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1288890126038208532) - -Summary: Hoops Finance, a DeFi protocol, discussed their platform they are building. https://www.hoops.finance/ - -1. They abstract away the complexity of DeFi investments for normal users through a series of guided prompts. -2. Provides simplified access to LP liquidity provisioning abstraction -3. Public AMM API for read/write data on AMMs on Stellar -4. Hoops Finance API: https://api.v1.xlm.services/#overview diff --git a/meeting-notes/2024-10-24.mdx b/meeting-notes/2024-10-24.mdx deleted file mode 100644 index 4190a737b9..0000000000 --- a/meeting-notes/2024-10-24.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: "2024-10-24" -authors: carstenjacobsen -tags: [developer] ---- - - - -Agenda: [Discord thread](https://discord.com/channels/897514728459468821/900374272751591424/1298362698123182080) - -SDF’s Platform team had an internal hackathon last week, with the purpose of building applications utilizing the [Composable Data Platform (CDP)](https://stellar.org/blog/developers/composable-data-platform). In this week’s developer meeting some of the team members present their projects. The apps are widely varied (trading app, fraud detection, JS browser-based ingester, etc) but the intent here is to show how easy CDP is to use. - -Projects presented in the meeting: - -1. Trade Aggregations Service -2. Deceptiscan - CDP fraud detection -3. Composable Data Platform Hackies - data indexer, payment indexer, contract expiration alerter, data indexes in DuckDB, torrents for data indexes and ledger metadata) -4. Data ingest with frontend JS -5. Real-time analytics diff --git a/meeting-notes/2024-11-14.mdx b/meeting-notes/2024-11-14.mdx deleted file mode 100644 index 60b02b3e5d..0000000000 --- a/meeting-notes/2024-11-14.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: "2024-11-14" -authors: carstenjacobsen -tags: [developer] ---- - - - -Agenda: [Discord thread](https://discord.com/events/897514728459468821/1304859059425382553/1306725344870400000) - -At this week’s developer meeting, Jeesun demonstrated the new Stellar Lab, showcasing its enhanced features designed to improve the developer experience. - -The tech stack of the new Stellar Lab was discussed in the meeting, and the following demos were used to show the Lab's functionality: - -1. Enable MultiSig Exercise -2. Stellar Wallets Kit -3. Create & Fund Account -4. Save KeyPairs Feature -5. Save Transactions Feature -6. XDR to JSON mapping -7. RPC Methods - including simulateTransaction diff --git a/meeting-notes/2024-12-05.mdx b/meeting-notes/2024-12-05.mdx deleted file mode 100644 index ecebf39dde..0000000000 --- a/meeting-notes/2024-12-05.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "2024-12-05" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -In this week's call we talk to Alberto Chaves from Trustless Work about building an escrow service on Stellar, followed by a quick introduction to using AI-based tools to develop Soroban smart contracts. diff --git a/meeting-notes/2024-12-12.mdx b/meeting-notes/2024-12-12.mdx deleted file mode 100644 index 2fb974e446..0000000000 --- a/meeting-notes/2024-12-12.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "2024-12-12" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -In this meeting, SDF Developer Advocate Chris Anatalio talks about the future stack, including smart contract wallets and Passkeys. In the second half, SDF Core Software Engineer Siddharth Suresh talks about increasing Soroban limits. diff --git a/meeting-notes/2024-12-19.mdx b/meeting-notes/2024-12-19.mdx deleted file mode 100644 index 8f9c4a3965..0000000000 --- a/meeting-notes/2024-12-19.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "2024-12-19" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -In this meeting SDF Core Software Engineer Jay talks about BLS building blocks, and walks through examples and demos. diff --git a/meeting-notes/2025-01-16.mdx b/meeting-notes/2025-01-16.mdx deleted file mode 100644 index 0527400b44..0000000000 --- a/meeting-notes/2025-01-16.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: "2025-01-16" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -Stellar Development Foundation's Ecosystem DevRel team got together to talk about the past year, and what 2025 brings. diff --git a/meeting-notes/2025-01-23.mdx b/meeting-notes/2025-01-23.mdx deleted file mode 100644 index ae36d5a326..0000000000 --- a/meeting-notes/2025-01-23.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "2025-01-23" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -Part 1 - -In this week's meeting Hoops Finance's founders Bastian and Tim talk about the progress they have made, some design considerations and the future of Hoops Finance. - - - -Part 2 - -Recording from a protocol meeting where two Core Advancement Proposals - - -CAP-0062 (Soroban Live State Prioritization) and CAP-0066 (Soroban In-memory Read Resource) where discussed. - -Here's are some resources to read up on: - -CAP-0062 - https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md
Discussion - https://github.com/stellar/stellar-protocol/discussions/1575 - -CAP-0066 - https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md
Discussion - https://github.com/stellar/stellar-protocol/discussions/1585 diff --git a/meeting-notes/2025-01-30.mdx b/meeting-notes/2025-01-30.mdx deleted file mode 100644 index c7200899ba..0000000000 --- a/meeting-notes/2025-01-30.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "2025-01-30" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -Part 1 - -RampMeDaddy will be joining us for this Stellar Developer Meeting. RampMeDaddy is currently participating in the Stellar x Draper Embark Program, and we ask what the team is up to, learn more about what they are building and a casual chat about how they are building their dapp on Stellar/Soroban. - -Visit their website here: https://rampmedaddy.com - - - -Part 2 - -In this protocol meeting two Core Advancement Proposals are discussed - Dima will be presenting CAP-0064 (Memo Authorization for Soroban), and Graydon will be presenting CAP-0065 (Reusable Module Cache). - -Here are some resources: - -CAP-0064 - https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md
Discussion - https://github.com/stellar/stellar-protocol/discussions/1610 - -CAP-0065 - https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md
Discussion - https://github.com/stellar/stellar-protocol/discussions/1615 diff --git a/meeting-notes/2025-02-06.mdx b/meeting-notes/2025-02-06.mdx deleted file mode 100644 index 5b3d96daa7..0000000000 --- a/meeting-notes/2025-02-06.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "2025-02-06" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - -## Part 1 - - - -The dev team behind Reflector and Refractor presents their tools for decentralized finance and transaction management on Stellar. Reflector delivers on-chain price feeds to enhance DeFi protocols, while Refractor provides a pending transaction storage and multisig aggregator for coordinated signing. They’ll cover how these products support trustless financial interactions and improve smart contract execution on Stellar. - -Visit their website here: https://reflector.network - -## Part 2 - - - -In this protocol meeting we discuss two Core Advancement Proposals - Dima will be presenting CAP-0063 (Parallelism-friendly Transaction Scheduling), and Siddharth will be presenting CAP-0067 (Unified Asset Events). - -Here are some resources to read up on: - -- [CAP-0063](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) - -- [CAP-0063 Discussion](https://github.com/stellar/stellar-protocol/discussions/1602) - -- [CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) - -- [CAP-0067 Discussion](https://github.com/stellar/stellar-protocol/discussions/1553) diff --git a/meeting-notes/2025-02-13.mdx b/meeting-notes/2025-02-13.mdx deleted file mode 100644 index 7139876294..0000000000 --- a/meeting-notes/2025-02-13.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "2025-02-13" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - -## Part 1 - - - -OrbitLens will showcase StellarBroker and StellarExpert services. StellarBroker is a multi-source liquidity swap router that aggregates liquidity from Soroban AMMs, Classic AMMs, and Classic orderbooks. The second part of the presentation will focus on Stellar smart contracts insights and on-chain transaction analytics using StellarExpert blockchain explorer. - -Links: - -- [StellarBroker](https://stellar.broker) - -- [StellarExpert](https://stellar.expert) - -## Part 2 - - - -This protocol meeting we discuss two Core Advancement Proposals - Dima is presenting CAP-0068 and CAP-0069, both of which add new host functions. - -Here are some resources to read up on: - -- [CAP-0068](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0068.md) - -- [CAP-0068 Discussion](https://github.com/stellar/stellar-protocol/discussions/1626) - -- [CAP-0069](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0069.md) - -- [CAP-0069 Discussion](https://github.com/stellar/stellar-protocol/discussions/1633) diff --git a/meeting-notes/2025-02-20.mdx b/meeting-notes/2025-02-20.mdx deleted file mode 100644 index 5467d4a49a..0000000000 --- a/meeting-notes/2025-02-20.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: "2025-02-20" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - -## Part 1 - - - -SDF Developer Advocate Elliot Voris gives an introduction to the KaleFail project, and cover how it was built, the purpose and future features. KaleFail is building on top of Kalepail’s KALE project and both serve as fully fledged demo applications, showcasing key Stellar/Soroban features. - -Links: - -- [KaleFail](https://kalefail.elliotfriend.com) - -- [Source Code](https://github.com/elliotfriend/project-kalefail) - -- [KALE](https://kalepail.com/kale) - -## Part 2 - - - -In this protocol meeting, a Core Advancement Proposals is discussed - Siddharth Suresh will will be presenting the memo related updates to CAP-0067. - -Here are some resources to read up on: - -- [CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) - -- [Discussion](https://github.com/stellar/stellar-protocol/discussions/1553) diff --git a/meeting-notes/2025-02-27.mdx b/meeting-notes/2025-02-27.mdx deleted file mode 100644 index bdc56a428e..0000000000 --- a/meeting-notes/2025-02-27.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "2025-02-27" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - -## Part 1 - - - -Blend, a universal liquidity protocol build on Stellar/Soroban, is preparing to release V2 and launched the Blend V2 Audit + Certora Formal Verification competition this past Monday. There are $125K in USDC up for grabs, and the team are joining this week’s meeting to talk about the competition and walk through some code. Join to learn more about Blend and the competition! - -Links: - -- [Blend](https://www.blend.capital) - -- [Competition](https://code4rena.com/audits/2025-02-blend-v2-audit-certora-formal-verification) - -## Part 2 - - - -In this protocol meeting Dima's and Leigh's prototypes for dealing with memos/muxed accounts in CAP-0067 are discussed, as well as updates to CAP-0066 (Soroban In-memory Read Resource). - -Links: - -- [CAP-0066](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) - -- [CAP-0066 Discussion](https://github.com/stellar/stellar-protocol/discussions/1585) - -- [CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) - -- [CAP-0067 Discussions](https://github.com/stellar/stellar-protocol/discussions/1553#discussioncomment-12306846) and [CAP-0067 Discussions](https://github.com/stellar/stellar-protocol/discussions/1553#discussioncomment-12309408) diff --git a/meeting-notes/2025-03-06.mdx b/meeting-notes/2025-03-06.mdx deleted file mode 100644 index 11e3d02600..0000000000 --- a/meeting-notes/2025-03-06.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "2025-03-06" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -In this meeting we have a conversation with Christian Santagata from OpenZeppelin about the Stellar x OpenZeppelin multi-year partnership, and talk about what the team is working on in the Stellar ecosystem. The conversation includes topics about Stellar token contracts and token standards, and how to use the OpenZeppelin Contract Wizard that now supports Stellar contracts. - -Links: - -- [OpenZeppelin Contract Wizard](https://wizard.openzeppelin.com/stellar) - -- [OpenZeppelin Stellar Contracts](https://github.com/OpenZeppelin/stellar-contracts) - -- [Dev Walkthrough](https://www.youtube.com/watch?v=iD7ZspsZLVo) diff --git a/meeting-notes/2025-03-27.mdx b/meeting-notes/2025-03-27.mdx deleted file mode 100644 index 0cb6010bfd..0000000000 --- a/meeting-notes/2025-03-27.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: "2025-03-27" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - -## Part 1 - - - -We are meeting with the OpenZeppelin team again to catch up on the latest feature additions, and this time we have invited two of their engineers, Boyan Barakov and Ozgun Ozerk, to answer technical questions from the audience. - -Links: - -- [OpenZeppelin Contract Wizard](https://wizard.openzeppelin.com/stellar) - -- [OpenZeppelin Stellar Contracts](https://github.com/OpenZeppelin/stellar-contracts) - -- [Dev Walkthrough](https://www.youtube.com/watch?v=iD7ZspsZLVo) - -## Part 2 - - - -This is a short protocol meeting, following up on previous discussions. diff --git a/meeting-notes/2025-04-03.mdx b/meeting-notes/2025-04-03.mdx deleted file mode 100644 index a02b752bc0..0000000000 --- a/meeting-notes/2025-04-03.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "2025-04-03" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -First we take a look at look at Stellar Lab, a lot of features and improvements have been added since we did a Stellar Lab presentation last year. Stellar Lab is a powerful tool for everyone developing on Stellar. - -We are looking for community feedback on the Upgradeable Contracts SEP, and will present the details of this proposal. - -Links: - -- [Stellar Lab](https://lab.stellar.org) - -- [Upgradeable Contracts SEP](https://github.com/stellar/stellar-protocol/pull/1671) diff --git a/meeting-notes/2025-04-10.mdx b/meeting-notes/2025-04-10.mdx deleted file mode 100644 index b9d80dd67a..0000000000 --- a/meeting-notes/2025-04-10.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: "2025-04-10" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -Pamphile (better know as tupui) is a senior software engineer working at Bitpanda. Previously he worked in the team who brought Flight Simulator 2020, and he created a consulting company to work on open source software. - -Now he is building his project named Tansu on the Stellar blockchain. Tansu is a decentralized system which aims to secure the software supply chain and break siloed communities by using a DAO. - -Link: - -- [Tansu](https://tansu.dev) diff --git a/meeting-notes/2025-04-17.mdx b/meeting-notes/2025-04-17.mdx deleted file mode 100644 index 93ac66c591..0000000000 --- a/meeting-notes/2025-04-17.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: "2025-04-17" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -This week we’ll dive into one of the most overlooked tools - Quickstart. - -Quickstart is a local Stellar network environment (node) that allows developers to run a local version of the Stellar network for development and testing. - -Learn about Quickstart, see how to install it and how to deploy the Hello World sample dapp on Quickstart. - -Link: - -- [Stellar Quickstart](/docs/tools/quickstart) diff --git a/meeting-notes/2025-05-01.mdx b/meeting-notes/2025-05-01.mdx deleted file mode 100644 index 548b2fd11e..0000000000 --- a/meeting-notes/2025-05-01.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: "2025-05-01" -authors: carstenjacobsen -tags: [developer] ---- - -import YouTube from "@site/src/components/YouTube"; - - - -We are close to completing the implementation of Whisk, Protocol 23 CAPs, the next step in the CAP lifecycle is voting by the Core CAP team. In this protocol meeting an update was provided on the CAPs and any final questions from the community was answered. - -Read up on the CAPs here: - -[CAP-0062](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) - [Discussion](https://github.com/orgs/stellar/discussions/1575) - -[CAP-0063](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) - [Discussion](https://github.com/orgs/stellar/discussions/1602) - -[CAP-0065](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) - [Discussion](https://github.com/orgs/stellar/discussions/1615) - -[CAP-0066](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) - [Discussion](https://github.com/orgs/stellar/discussions/1585) - -[CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) - [Discussion](https://github.com/orgs/stellar/discussions/1553) - -[CAP-0068](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0068.md) - [Discussion](https://github.com/orgs/stellar/discussions/1626) - -[CAP-0069](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0069.md) - [Discussion](https://github.com/orgs/stellar/discussions/1633) - -[CAP-0070](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0070.md) - [Discussion](https://github.com/orgs/stellar/discussions/1719) diff --git a/meeting-notes/2025-05-22.mdx b/meeting-notes/2025-05-22.mdx deleted file mode 100644 index 1cb168aa01..0000000000 --- a/meeting-notes/2025-05-22.mdx +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: "2025-05-22" -authors: carstenjacobsen -tags: [developer] ---- - - - -At this Stellar Developer Meeting we are having a chat with Bram Hoogenkamp from OpenZeppelin about the OpenZeppelin Monitor - providing monitoring and alerting for smart contracts. diff --git a/meeting-notes/2025-07-10.mdx b/meeting-notes/2025-07-10.mdx deleted file mode 100644 index e50f55704f..0000000000 --- a/meeting-notes/2025-07-10.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "2025-07-10" -authors: carstenjacobsen -tags: [developer] ---- - - - -In this call we are getting an update on the exciting Scaffold Stellar project by Fifo from Aha Labs. - -Scaffold Stellar is a great way to fast-track new projects built on Stellar. It has starter smart contracts, frontend, plugins and tooling built-in. - -[Scaffold Stellar Repo](https://github.com/theahaco/scaffold-stellar) diff --git a/meeting-notes/2025-07-17.mdx b/meeting-notes/2025-07-17.mdx deleted file mode 100644 index 1c66005564..0000000000 --- a/meeting-notes/2025-07-17.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "2025-07-17" -authors: carstenjacobsen -tags: [developer] ---- - - - -In this meeting the Nomyx team will give us an introduction to their advanced smart contract architecture (an implementation of the Diamond Pattern) and the Nomyx Diamond proxy viewer interface, that facilitates inspection of deployed diamonds and makes them a bit easier to work with. - -They will also be showcasing a live deployment instance of the diamond proxy standard currently used for the Nomyx platform which removes limitations related to the upper contract size limit, allowing developers to launch complex products. - -[Nomyx Website](https://www.nomyx.io) diff --git a/meeting-notes/2025-07-24.mdx b/meeting-notes/2025-07-24.mdx deleted file mode 100644 index f7ee6cd4a4..0000000000 --- a/meeting-notes/2025-07-24.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "2025-07-24" -authors: carstenjacobsen -tags: [developer] ---- - - - -In this meeting we are talking to Esteban and Francisco from PaltaLabs. Stellar Hacks is currently running a hackathon in collaboration with PaltaLabs, where the goal is to build using their projects Soroswap and DeFindex. - -Let's see how the hackathon is going, and some advice if you are still considering entering, there's still time to enter! - -Links: - -- [Hackathon](https://dorahacks.io/hackathon/stellar-hacks-paltalabs/detail) -- [Soroswap](https://docs.soroswap.finance) -- [DeFindex](https://docs.defindex.io) diff --git a/meeting-notes/2025-08-07.mdx b/meeting-notes/2025-08-07.mdx deleted file mode 100644 index ac3cf30ef5..0000000000 --- a/meeting-notes/2025-08-07.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: "2025-08-07" -authors: carstenjacobsen -tags: [developer] ---- - - - -In this meeting we talk to Flashback founder Brieuc Berruet, who participated in the DraperU x Stellar incubator program last year, to get an update on his project (spoiler alert - it's live!). - -Learn more about the project, and see a cool demo! - -[Flashback Website](https://www.flashback.tech) diff --git a/meeting-notes/2025-09-25.mdx b/meeting-notes/2025-09-25.mdx deleted file mode 100644 index aaafdce411..0000000000 --- a/meeting-notes/2025-09-25.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: "2025-09-25" -authors: carstenjacobsen -tags: [developer] ---- - - - -In this week’s Stellar Developer Meeting we discuss new Core Advancement Proposals. - -Here are some resources to read up on: - -[CAP-0071](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0071.md) - [Discussion](https://github.com/orgs/stellar/discussions/1784) - -[CAP-0072](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md) - [Discussion](https://github.com/orgs/stellar/discussions/1763) - -[CAP-0073](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0073.md) - [Discussion](https://github.com/orgs/stellar/discussions/1668) diff --git a/meeting-notes/2025-10-02.mdx b/meeting-notes/2025-10-02.mdx deleted file mode 100644 index 32577fac55..0000000000 --- a/meeting-notes/2025-10-02.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "2025-10-02" -authors: carstenjacobsen -tags: [developer] ---- - - - -In this meeting we will discuss CAP-74, BN254 and Poseidon hash functions. - -[CAP-0074](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0074.md) - [Discussion](https://github.com/orgs/stellar/discussions/1780) diff --git a/meeting-notes/2025-10-09.mdx b/meeting-notes/2025-10-09.mdx deleted file mode 100644 index bfede2756a..0000000000 --- a/meeting-notes/2025-10-09.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: "2025-10-09" -authors: carstenjacobsen -tags: [developer] ---- - - - -## OpenZeppelin UI Builder demo - -We are meeting with the OpenZeppelin team again to catch up on the latest developer tooling work they are doing. - -This time Steve will demo UI Builder, an easy way to spin up a front-end for any contract call in seconds. UI Builder allows you to select a function and then it auto-generates a React UI with wallet connect and multi-network support, and exports a complete app. - -[OpenZeppelin UI Builder](https://builder.openzeppelin.com) - - - -## Protocol Discussion - -In this Core Advancement Proposal discussion CAP-0075 (Host functions for enabling Poseidon and Poseidon2 hash functions) is presented. - -This CAP proposes adding host functions for cryptographic primitives enabling Poseidon family of hash functions, which are widely-adopted hash choices in efficient zero-knowledge proof systems. Supporting these as host functions in Soroban can facilitate adoption of ZK applications and interoperability with other ecosystems. - -Link to [CAP-0075](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0075.md) diff --git a/meeting-notes/2025-10-16.mdx b/meeting-notes/2025-10-16.mdx deleted file mode 100644 index 7e9032cc0a..0000000000 --- a/meeting-notes/2025-10-16.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "2025-10-16" -authors: carstenjacobsen -tags: [developer] ---- - - - -## Protocol Discussion - -In this call the recent state archival issue, introduced by Whisk (Protocol 23), is discussed. Questions from community builders are also answered. diff --git a/meeting-notes/2025-10-23.mdx b/meeting-notes/2025-10-23.mdx deleted file mode 100644 index 9eeb1c6959..0000000000 --- a/meeting-notes/2025-10-23.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "2025-10-23" -authors: carstenjacobsen -tags: [developer] ---- - - - -In this meeting we are talking to Dobprotocol to learn more about how they use Stellar to build a unique platform, turning machines into investable assets. - -Website: https://www.dobprotocol.com diff --git a/meeting-notes/2025-10-30.mdx b/meeting-notes/2025-10-30.mdx deleted file mode 100644 index 1893aab9e5..0000000000 --- a/meeting-notes/2025-10-30.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: "2025-10-30" -authors: carstenjacobsen -tags: [developer] ---- - - - -In this meeting we are continuing our mini series about the Stellar-based open source tooling OpenZeppelin is developing. In the last session we talked briefly about Relayer and this time we are diving deeper into this tool, and OpenZeppelin Managed Service. - -OpenZeppelin Relayer: https://docs.openzeppelin.com/relayer diff --git a/meeting-notes/2025-11-06.mdx b/meeting-notes/2025-11-06.mdx deleted file mode 100644 index 54058a45ae..0000000000 --- a/meeting-notes/2025-11-06.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: "2025-11-06" -authors: carstenjacobsen -tags: [developer] ---- - - - -In this meeting we do a walkthrough of OpenZeppelin’s Q3 library releases for Smart Account, Vault and RWA. - -Links to docs: - -- Smart Account: https://docs.openzeppelin.com/stellar-contracts/accounts/smart-account -- Vault: https://docs.openzeppelin.com/stellar-contracts/tokens/vault/vault -- RWA: https://docs.openzeppelin.com/stellar-contracts/tokens/rwa/rwa diff --git a/meeting-notes/authors.yml b/meeting-notes/authors.yml deleted file mode 100644 index afc4ce29b7..0000000000 --- a/meeting-notes/authors.yml +++ /dev/null @@ -1,51 +0,0 @@ -carstenjacobsen: - name: Carsten Jacobsen - title: Senior Developer Advocate - url: https://github.com/carstenjacobsen - image_url: https://github.com/carstenjacobsen.png - page: true - socials: - github: carstenjacobsen - x: CarstenJacobsen - linkedin: carstenjacobsendk -elliotfriend: - name: Elliot Voris - title: Senior Developer Advocate - url: https://github.com/ElliotFriend - image_url: https://github.com/ElliotFriend.png - page: true - socials: - github: ElliotFriend - x: ElliotFriend - linkedin: elliotfriend -kalepail: - name: Tyler van der Hoeven - title: Developer Advocate Director - url: https://github.com/kalepail - image_url: https://github.com/kalepail.png - page: true - socials: - github: kalepail - x: kalepail - linkedin: tyvdh -anataliocs: - name: Chris Anatalio - title: Senior Developer Advocate - url: https://github.com/anataliocs - image_url: https://github.com/anataliocs.png - socials: - github: anataliocs -naman: - name: Naman Kumar - title: Product Manager - url: https://github.com/namankumar - image_url: https://github.com/namankumar.png - socials: - github: namankumar -julian: - name: Julian Martinez - title: Senior Developer Advocate - url: https://github.com/Julian-dev28 - image_url: https://github.com/Julian-dev28.png - socials: - github: Julian-dev28 diff --git a/meetings/2019-01-24.mdx b/meetings/2019-01-24.mdx new file mode 100644 index 0000000000..53d1133f4d --- /dev/null +++ b/meetings/2019-01-24.mdx @@ -0,0 +1,66 @@ +--- +title: "Protocol Process and Trustline Proposals" +description: "This overview highlights anchor services and network fees and resource limits." +authors: johnny-goodnow +tags: + - legacy + - CAP-10 + - CAP-13 + - CAP-15 + - CAP-5 + - CAP-7 + - CAP-8 + - SEP-13 + - SEP-6 + - SEP-8 +--- + +## [Public Discussion](https://groups.google.com/g/stellar-dev/c/x0Wrj2kuQ0U/m/CrId5QkoFwAJ) + +### Agenda + +- [CAP-0005](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0005.md) & [CAP-0006](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0006.md) - moving to approval. +- [Cosigned Assets Draft](https://github.com/stellar/stellar-protocol/issues/146) - in draft since September; current approach considered viable. +- [CAP-0007 Revised Proposal](https://github.com/stellar/stellar-protocol/blob/master/drafts/draft-detacct.md). +- [CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) - Bump Fee Transactions update. +- CAP-0013 vs SEP-0013 (CAP-0007 + CAP-0008) - Change Trustlines to Balances. + +### Meeting Notes + +- CAP-0006 progressing with specific messaging for a targeted use case. +- CAP-0005 still lacks replace-by-fee and remains problematic. +- Deterministic accounts & creatorTxID (David): + - Vastly simplified since prior iteration. + - Salt mechanism needed; likely a full hash to prevent protocol rewind attacks. +- Cosigned Assets: + - Related to [SEP-8 (Regulated Assets)](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0008.md). + - NFT discussions considered orthogonal; may warrant a separate CAP. + - Catalog of no-op operations discussed: + - Payment to oneself + - Bump op 0 + - SetOptions with no fields + +### Follow-up Actions + +- Johnny: coordinate CAP-0006 community and API updates. +- David: expand deterministic accounts draft and address CAP-0010/CAP-0015 implications. +- Orbit: consolidate NFT discussions and assess need for a separate CAP. +- Jeremy: review SEP-13A comments and address shortcomings in SEP-13. + +### Trustline follow-up + +The adjacent trustline-focused discussion reprised the agenda above and surfaced additional nuance around asset onboarding and NFT considerations. Tom and Orbit asked the group to clarify how cosigned assets should handle no-op operations, and David emphasized that NFT work is orthogonal but still worth cataloging. + +- Tom asked for clarity on SEP-13A’s handling of edge cases, and Orbit volunteered to gather NFT-related requirements. The session reviewed the same high-level trustline-use case summary document in [Google Docs](https://docs.google.com/document/d/1_3KcBTbF7Diu_wu-ArvajYhEcPoDp9SgK5-dNruOK3c/edit#heading=h.8j5x6v50ft4p). That document lays out the anchor requirements around SEP-6/SEP-13 deposits and transfers so wallets set up user-facing flows that never require extra trustlines/XLM or manual follow-up steps, while also supporting sends to accounts that don’t yet exist. + - Deposits must complete once the anchor receives the external asset, delivering funds to the user’s original account whether or not it has a trustline or even exists yet. + - Users shouldn’t need to add additional XLM to finish a deposit, nor should anchors wait on any extra manual action once the deposit is underway. + - The send requirements emphasize end-to-end usability: sends must arrive without the recipient doing any setup, and the document recommends keeping implementations as simple as possible to encourage adoption. + - The group cataloged known no-ops (payment to self, bump op 0, SetOptions with no fields) and challenged attendees to bring simple, actionable alternatives rather than more complexity. + - A short-term TODO list reiterated that deterministic accounts need a salt/hash to guard against rewind attacks, while CAP-0006 updates and SEP-13A comments should stay in sync with the evolving API design. + +### Trustline follow-up actions + +- Johnny: continue improving the DEX-focused messaging tied to CAP-0006 and sync with Jon on API revisions. +- David: add concrete deterministic-account examples, harden salt handling, and spell out whether those accounts supplant CAP-0010/CAP-0015 work. +- Orbit: collect NFT and Asset-object feedback to determine whether a new CAP is warranted. +- Jeremy: respond to Tom’s SEP-13A comments and close the remaining gaps in the proposal. diff --git a/meetings/2019-03-07.mdx b/meetings/2019-03-07.mdx new file mode 100644 index 0000000000..8745e9082f --- /dev/null +++ b/meetings/2019-03-07.mdx @@ -0,0 +1,107 @@ +--- +title: "CAP Process and Authorization Updates" +description: "This overview highlights network fees and resource limits." +authors: johnny-goodnow +tags: + - legacy + - CAP-10 + - CAP-11 + - CAP-13 + - CAP-14 + - CAP-15 + - CAP-16 + - CAP-17 + - CAP-18 + - CAP-7 + - CAP-8 + - CAP-9 + - SEP-13 +--- + +## [Public Discussion](https://groups.google.com/g/stellar-dev/c/NJl_loV_RXU/m/SarzWEJPAgAJ) + +### Announcements + +- **[Merging in CAP/SEP Process](https://github.com/stellar/stellar-protocol/pull/247)** + - Another round of edits completed; further feedback requested on the PR. + - Intent to merge immediately, formalizing the previously ad-hoc CAP/SEP process. + - All CAPs will be numbered upon acceptance as drafts; competing proposals will be explicitly rejected. + - Post-merge cleanup planned for stale PRs and issues, with authors notified of next steps. +- **Establishing focus given limited developer resources** + - Fee structure + - Payment channels + - Payment network (**Starlight**) + +### Final Comment Period - Acceptance + +- [CAP-0017](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0017.md) - Update `LastModifiedLedgerSeq` If and Only If LedgerEntry Is Modified (Jonathan Jove) +- [CAP-0018](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md) - Fine-Grained Control of Authorization (Jonathan Jove) +- Address offer mutability (Jon) +- CAP-0016 to be rejected contingent on CAP-0018 approval + +### Priorities + +1. Replace Min Fee Mechanism +2. Trustline (& other) Usability + - Explicit intent to revive [CAP-0013 Change Trustlines to Balances](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0013.md) (Dan Robinson). +3. Payment Channels + - Focus on core components required for a basic payment channel design. +4. Starlight + - Deferred pending availability of dedicated development resources. + +### Deferred Items + +- [CAP-0007](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0007.md) - Deterministic Account Creation (Jeremy Rubin) +- [CAP-0008](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0008.md) - Self-Identified Pre-Auth Transaction (Jeremy Rubin) +- [CAP-0009](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0009.md) - Linear / Exterior Immutable Accounts (Jeremy Rubin) +- [CAP-0011](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0011.md) - Relative Account Freeze (Jeremy Rubin) +- Draft: Deterministic accounts and creatorTxID (David Mazières) +- [CAP-0014](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0014.md) - Adversarial Transaction Set Ordering (Jeremy Rubin) + +### Agenda + +- Final Comment proposals - new unresolved concerns (30 min max) +- CAP-0017, CAP-0018 +- Address offer mutability +- Replace Min Fee (60 min max) + +### Replace Min Fee Discussion + +- Diverging views: + - David and Nico prefer a mechanism _before_ raising the min fee. + - Jed favors pushing fee increases forward immediately. +- Current proposals: + - [CAP-0010](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0010.md) - Fee Bump Account (Jeremy Rubin) + - [CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) - Bump Fee Transactions (OrbitLens) + - Pre-draft: feeSource and feeMultiplier (David Mazières) +- [Reference spreadsheet](https://docs.google.com/spreadsheets/d/1_u0LE61V-jkXUcNBZW5xhXZEmIz5rQSgZEr8VsVl47o/edit) (OrbitLens) compares fee-sponsorship approaches (CAP-10 fee balances, feeSource/feeMultiplier, deterministic accounts, CAP-15 metadata) across tradeoffs like explicit sponsor control, pre-signed tx support, and algorithmic complexity. + - CAP-10 introduces dedicated fee balances so sponsors can preload fees, but it requires new operations and careful management of locked fee funds. + - FeeSource/feeMultiplier and deterministic accounts keep fees on existing transactions but push complexity to signers (pick fee source in advance, ensure simultaneous submissions) with limited control. + - CAP-15 embeds fee bumping in the transaction envelope, letting sponsors declare amounts per envelope, while the spreadsheet emphasizes choosing the right tradeoff between sponsor control and implementation simplicity. +- Outcomes: + - David to draft a new CAP superseding CAP-0015, including Transaction versioning. + - Johnny to follow up with David regarding feeSource draft status. + +### Trustline Usability + +- Discussion on next steps for CAP-0013 Change Trustlines to Balances +- Open questions: + - What changes would improve group consensus? + - Are there unpresented fundamental ideas? + - Who should own the proposal going forward? +- Related discussion: **Sender Pays** (sending assets without trustlines). +- Consensus points: + - CAP-0013 enables adding balances to others and simplifies token distribution. + - SEP-0013 approach explicitly rejected. + - Hard requirements need definition; Tom Q. designated owner. + - Developer-friendliness and Account ID semantics must be addressed. + +### Meeting Notes + +- Inline notes (highlighted in blue in original thread) captured above. + +### Follow-Up Actions + +- **Jon Jove** to update CAPs based on email list feedback. +- **Tom Quisel** to revise and present hard requirements for trustline usability. +- **David** to submit a new proposal for TransactionEnvelope versioning to enable changes like CAP-0015. diff --git a/meetings/2019-03-14.mdx b/meetings/2019-03-14.mdx new file mode 100644 index 0000000000..8a4caf3d82 --- /dev/null +++ b/meetings/2019-03-14.mdx @@ -0,0 +1,59 @@ +--- +title: "Trustline Usability and Authorization Changes" +description: "This overview highlights anchor services and network fees and resource limits." +authors: johnny-goodnow +tags: + - legacy + - CAP-13 + - CAP-15 + - CAP-17 + - CAP-18 + - CAP-19 + - SEP-13 + - SEP-16 + - SEP-17 + - SEP-6 +--- + +## [Public Discussion](https://groups.google.com/g/stellar-dev/c/uxmo4LM5FWY/m/psazVteTBgAJ) + +### Announcements + +- Jon made additional changes to [CAP-0018](https://github.com/stellar/stellar-protocol/pull/263); with no further comments, it will move out of FCP next week. +- [CAP-0017](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0017.md) moved to **Approved**. + +### Agenda + +#### Trustline Usability + +- Restated [requirements document](https://docs.google.com/document/d/1_3KcBTbF7Diu_wu-ArvajYhEcPoDp9SgK5-dNruOK3c/edit#heading=h.8j5x6v50ft4p) up for discussion. The doc captures the desired SEP-6/SEP-13 deposit and send flows: anchors should auto-complete deposits without additional user actions, deliver funds to the intended account even when it has no trustline or XLM, and allow sends to non-existent accounts by crediting the original account ID. + - Deposits must finish once the anchor receives the external asset, never forcing the user to add trustlines or extra XLM, even when accounts are uncreated. + - Sending should work regardless of the recipient’s trustlines or account state, and funds should arrive at the original Stellar account without extra steps. + - The document stresses usability: anchors/wallets should avoid complex workflows, so any solution must stay simple, fast, and easy to implement. + - [SEP-0017](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0017.md): Deposit & transfer of assets using [CAP-0013](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0013.md) + - [SEP-0016](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0016.md): Deposit & transfer of assets using deterministic accounts + - It is not possible to satisfy all requirements in the protocol's current form. + +#### Issues with [CAP-0013](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0013.md) + +- If base reserve increases, malicious actors could add large numbers of trustlines to an account. +- Accounts can be DoS'd via the subentry limit. +- The proposal has very large scope, affecting most operations and assumptions in the protocol. + +#### Alternatives Discussed + +- Use deterministic accounts and merge them if the receiver already has an account. +- Introduce a **SentBalance** ledger entry claimable by the receiver. +- Introduce a **remove trustline** operation that returns assets to the sender. + +#### Fee Mechanisms + +- David proposed [CAP-0019](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0019.md): + - Enables future-upgradability of `TransactionEnvelope` + - Allows a simpler implementation of [CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) + +### Follow-up Actions + +- **David**: Write a short proposal on "embryonic accounts". +- **Jeremy**: Clarify SEP-0016 workflow with deterministic accounts. +- **Jon**: Provide analysis of attack surfaces and issues in [CAP-0013](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0013.md). diff --git a/meetings/2019-03-21.mdx b/meetings/2019-03-21.mdx new file mode 100644 index 0000000000..4846b61410 --- /dev/null +++ b/meetings/2019-03-21.mdx @@ -0,0 +1,33 @@ +--- +title: "Fee Updates and Order Book Ideas" +description: "This overview highlights order book trading and network fees and resource limits." +authors: johnny-goodnow +tags: + - legacy + - CAP-15 + - CAP-16 + - CAP-18 + - CAP-19 +--- + +## [Public Discussion](https://groups.google.com/g/stellar-dev/c/KzgQc58510o/m/1qRSxo7GCAAJ) + +### Announcements + +- [CAP-0016](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0016.md) rejected in favor of [CAP-0018](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md) (now Accepted). CAP-0016 will only be revived if CAP-0018 encounters implementation issues. +- [CAP-0018](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md) moved to Accepted. +- [CAP-0019](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0019.md) moved to FCP: Acceptance. +- Jed was in Singapore this week; trustline discussions were postponed until his return. + +### Agenda + +#### Fee Follow-ups + +- Topics: [CAP-0019](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0019.md), [CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md), and other fee-related suggestions. +- Decision: Push CAP-0019 to FCP: Acceptance and reframe CAP-0015 in the context of CAP-0019. + +#### Future of the Order Book - Ideation + +- Broad discussion of possible directions for the order book. +- No commitments were made. +- Goal: Produce an exhaustive list of ideas, followed by prioritization and requirements definition in future sessions. diff --git a/meetings/2019-03-28.mdx b/meetings/2019-03-28.mdx new file mode 100644 index 0000000000..bd98da83d2 --- /dev/null +++ b/meetings/2019-03-28.mdx @@ -0,0 +1,38 @@ +--- +title: "Bump Fee and Multisig Submission" +description: "This overview highlights network fees and resource limits." +authors: johnny-goodnow +tags: + - legacy + - CAP-15 + - CAP-19 + - CAP-20 + - CAP-5 + - SEP-19 +--- + +## [Public Discussion](https://groups.google.com/g/stellar-dev/c/t162rVxLYB4/m/XB4_9st1BgAJ) + +### Announcements + +- [CAP-0019](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0019.md) (Future-upgradable TransactionEnvelope type) is moving to Accepted. +- "Implementation Review" stage added for CAPs (see https://github.com/stellar/stellar-protocol/pull/279); keep in mind for future CAP process. +- Jed out this week; trustline discussion pushed again. +- [CAP-0020](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0020.md) implementation is mostly there. + +### Agenda + +- [CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) (Bump Fee Extension): Modified to utilize [CAP-0019](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0019.md). Goal is to bring it to a vote, or enumerate what is needed to make it pass. + - David: Two transaction IDs (inner and outer) as a result of the change. + - Nico: Original feedback was that this relies on [CAP-0005](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0005.md) logic that no longer exists in the CAP-0005 implementation. + - Nico and David to discuss offline; David to submit additional edits and document constraints and issues from the meeting. +- [SEP-0019](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0019.md) (Bootstrapping Multisig Transaction Submission): + - Significant discussion with contributions from Paul Selden, Antoine (MrTicot), Orbit, Johan, and others. + - Focus should remain on SEP-0019 and direction for any related SEPs. + - Can pull in Paul and others for the next meeting; it is likely the most active SEP currently. + - SEP-0019 is largely focused on the StellarGuard protocol and currently only describes basic flow properties and API endpoints; substantial work remains. + - Open questions: + - Metadata (storage and off-chain handling) + - Coordinator (no core changes required) + - Possible use of an IPFS home domain + - Orbit to send a document outlining design considerations for multisig transaction coordination. diff --git a/meetings/2019-04-04.mdx b/meetings/2019-04-04.mdx new file mode 100644 index 0000000000..abfe01bda7 --- /dev/null +++ b/meetings/2019-04-04.mdx @@ -0,0 +1,57 @@ +--- +title: "Mergeable Trustlines and Usability Requirements" +description: "This overview highlights Horizon API and network fees and resource limits." +authors: johnny-goodnow +tags: [legacy, CAP-15, SEP-16] +--- + +## [Public Discussion](https://groups.google.com/g/stellar-dev/c/QSR2wf207ow/m/stluewnDCQAJ) + +### Announcements + +- Pushing fee work out to next week given David and Orbit's absence (Tomer is also out). + +### Agenda + +- Discussing next steps for trustlines work. +- David personally doesn't feel good about pushing forward Sponsored Trustlines, despite multiple people who think it's mostly there (Tom, Orbit). +- There was initial discussion of going back to [SEP-0016 (atp3 account transfer)](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0016.md). +- Goal today is to discuss alternative solutions (as opposed to criticism of existing solutions), given the requirements that are extensively listed [here](https://docs.google.com/document/d/1_3KcBTbF7Diu_wu-ArvajYhEcPoDp9SgK5-dNruOK3c/edit). + - Deposits have to finish without additional user clicks, even when the recipient’s account is uncreated or lacks a trustline, and nor should the user need to fund extra XLM. + - The send workflow should credit the intended account ID even if no trustline exists, so clients don’t need to coordinate new trustlines for every payment. + - The document’s overall goal is usability: keep issuer/wallet flows simple so developers actually ship the solution instead of avoiding a hard protocol change. + +### Jon proposal + +- Trying to solve the add balance problem, and the merge problem related to trustlines. +- We add a concept called a mergeable trustline. +- Can send them to anyone; only the account owner can merge it into a "classic" trustline. +- "CAP-XXXX (Pending Trustlines)" that doesn't break merge. +- Carries its own reserve, uses union-find to merge accounts. Would give O(log n) performance for merging accounts (not the most efficient version of union-find, but still performant). + +### Summary of mergeable trustline details + +- Mergeable trustlines are associated with accounts via a variant of a union-find data structure (see https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-046j-design-and-analysis-of-algorithms-spring-2012/lecture-notes/MIT6_046JS12_lec16.pdf) that does not include the path-compression optimization. +- Mergeable trustlines are immutable, except that the balance can be transferred in its entirety to the "real" trustline for the corresponding asset on the same account (noted in the meeting that this might be relaxable). +- Each mergeable trustline carries an amount of native asset equal to the base reserve at the time it was created, and this is the entire reserve requirement of the mergeable trustline. +- An account can have an arbitrary number of mergeable trustlines associated with it, even multiple mergeable trustlines for the same asset. +- Sending a mergeable trustline never fails if the receiving account exists. +- Merging accounts never fails due to the presence of mergeable trustlines. +- Open questions included what happens if the sender owns the trustline, or if the sender merges their account, creating issues around locking accounts and outstanding trustlines. +- Revocability should not be done on-ledger; it should be handled via pre-authorized transactions or other multi-phase protocols. If it is on-ledger, it should be irrevocable. + +### Nico's caveats + +- Similar complexities arise as with SEP-0016: clients must track incoming mergeable trustlines, decide whether they matter, query them from Horizon, and take actions. +- This puts significant burden on clients to download and reason about many mergeable trustlines. +- Union-find might help reduce what needs to be downloaded, potentially in tandem with SEP-0016. +- David, Nico, and Jon's proposals all rely on some form of "incoming trustline" at the core layer, which does not appear substantially simpler than ecosystem-level approaches. + +### SEP-0016 thoughts + +- Defining the last step is difficult given current protocol constraints and would require "shrink to fit" and other small core changes. +- SEP-0016 should likely remain a living document that evolves alongside core capabilities. + +### Follow-up actions + +- David to put together a list of constraints and concerns blocking [CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) from moving forward with Nico. diff --git a/meetings/2019-06-13.mdx b/meetings/2019-06-13.mdx new file mode 100644 index 0000000000..148597fe15 --- /dev/null +++ b/meetings/2019-06-13.mdx @@ -0,0 +1,23 @@ +--- +title: "Preconditions and Fee Bump Finalization" +description: "This overview highlights network fees and resource limits." +authors: johnny-goodnow +tags: [legacy, CAP-15, CAP-21] +--- + +## [Public Discussion](https://groups.google.com/g/stellar-dev/c/I1GAjjcfDA0/m/CIzRMI-FBAAJ) + +### Announcements + +- [CAP-0021](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) and [CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) moved to FCP: Acceptance. + +### Agenda + +- David's Pre-Condition proposal: [CAP-0021](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) + - After several iterations with the Channels working group, CAP-0021 was presented for decision. + - Following discussion on its importance for payment channels and its fit within the current framework, the group agreed to move CAP-0021 to FCP: Acceptance. +- Clarifying final aspects of fee bump ([CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md)), including questions raised in Nico's latest pull request: + - https://github.com/stellar/stellar-protocol/pull/323 + - CAP-0015 will move to FCP: Acceptance after a final round of wording changes. +- If time permits, Jon to give an update from the trustlines/balances work. + - Decision made not to discuss this fully until the draft is finalized, particularly around rationale and alternative ideas. diff --git a/meetings/2019-06-27.mdx b/meetings/2019-06-27.mdx new file mode 100644 index 0000000000..22542b9618 --- /dev/null +++ b/meetings/2019-06-27.mdx @@ -0,0 +1,75 @@ +--- +title: "Two-Part Payments and Preconditions" +description: "This overview highlights transaction data." +authors: johnny-goodnow +tags: + - legacy + - CAP-15 + - CAP-21 + - CAP-22 + - CAP-23 +--- + +## [Public Discussion](https://groups.google.com/g/stellar-dev/c/va9Y7lkP4uI/m/K2W_AkvzCQAJ) + +### Announcements + +- [CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) and [CAP-0021](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) move to _Accepted_ today (after this meeting), barring no new concerns. +- [CAP-0021](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) moves back to _Draft_ in order to resolve [CAP-0022](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0022.md) prior to acceptance. + +### Agenda + +- [CAP-0023](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) - _Two-Part Payments with BalanceEntry_ + - Focused on temporal separation between initiating a payment and receiving a payment. + - Explicitly **does not** replace existing payment operations. + - **David's initial framing:** ~10% additional complexity yields significantly more utility. + +#### CAP-0023 Design Discussion + +- **AuthorizeBalance** + - Should include an optional `AccountID *revocableBy`. + - Small increase in complexity but worth it for added control. +- **Threshold semantics** + - `CreateBalance` should use the _medium_ threshold. + - Authorization should mirror AllowTrust. + - Claiming at a _low_ threshold may be acceptable, since low-threshold keys cannot otherwise act. +- **Operation naming** + - Consider appending `Op` to operation names (XDR style / consistency). +- **NATIVE asset semantics** + - AuthorizeBalance behavior for `NATIVE` assets is unspecified (invalid vs no-op). + - Might be better to factor shared data structures out of both AuthorizeBalance and AllowTrustOp. + - AuthorizeBalance operates by ID and is unaware of the asset of the BalanceEntry; this structure may still be useful for AllowTrust. +- **Why AuthorizeBalance exists** + - Needed to authorize _both_ the account and the payment itself. + - Necessary for deauthorization when assets are sent to bad actors. + - Important for payment channels to ensure all parties are authorized before payout. +- **ACCOUNT_MERGE interaction** + - BalanceEntries resemble hyper-specialized deterministic accounts. + - They do **not** participate in merges and may be abandoned (with their own reserve). + - This behavior should be explicitly documented in the CAP. +- **Global revoke scenarios** + - In cases where assets must be returned for replacement, funds locked in BalanceEntries pose challenges. + - AuthorizeBalance enables trustline authorization prior to account creation. + - Even if authorization is revoked, reclaiming balances should still be possible. +- **Extensibility** + - Consider extending `claimBy` using a union to allow future claim-by-signer functionality. +- **UX considerations** + - Some concern that less-knowledgeable users may struggle with the flow. + - Others believe wallet UX and the existing payment operation mitigate this risk. + +### CAP-0021 / CAP-0022 & Payment Channel Implementation + +- [CAP-0021](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) - _Generalized transaction preconditions_ + - Moved out of Final Comment Period back to Draft due to concerns raised by CAP-0022. + - May be implemented separately (branch or protocol version) for payment channels. + - Preference is to resolve outstanding issues before acceptance. +- [CAP-0022](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0022.md) - _Invalid transactions must have no effects_ + - Open question: adopt CAP-0022 as written, or pursue a stronger invariant (only fully executing transactions succeed). + +### Follow-Up Actions + +- [CAP-0023](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) (Jon) + - Add explicit behavior details regarding `ACCOUNT_MERGE`. + - Evaluate low-threshold usage for `ClaimBalance`. + - Append `Op` to operation names in XDR. + - Extend BalanceEntry claimer via a union to support future signer-based claims or wildcard claimBy. diff --git a/meetings/2019-07-25.mdx b/meetings/2019-07-25.mdx new file mode 100644 index 0000000000..0c9ab61fa1 --- /dev/null +++ b/meetings/2019-07-25.mdx @@ -0,0 +1,28 @@ +--- +title: "Path Payments and Preconditions Progress" +description: "This overview highlights protocol updates, tooling demos, and ecosystem discussions." +authors: johnny-goodnow +tags: + - legacy + - CAP-15 + - CAP-21 + - CAP-22 + - CAP-24 + - CAP-25 +--- + +## [Public Discussion](https://groups.google.com/g/stellar-dev/c/a9JH2c3jTEw/m/k5wg4XNnDAAJ) + +### Agenda + +- [CAP-0024](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0024.md) - Path Payment w/Fixed Amount + - Jon to follow up with Morley to get perspective on how this feels from a wallet implementation's perspective, particularly between the two operations. +- [CAP-0015](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) + - Implementation question around removal of one-time signers, which currently does not take place. + - Consensus: preserve existing behavior for now and discuss behavior changes separately. + - Jon to follow up with Nico when he returns to better understand existing behavior. +- [CAP-0021](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), [CAP-0022](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0022.md), [CAP-0025](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0025.md) - Preconditions, Invalid Transaction Effects, Shadow Bucket Removal + - CAP-0025 moves to Final Comment Period (FCP): Acceptance. + - CAP-0021 is dependent on CAP-0022. + - CAP-0022 must address additional feedback from Nico. + - David to own follow-up on CAP-0021/0022 dependency resolution. diff --git a/meetings/2019-11-04.mdx b/meetings/2019-11-04.mdx new file mode 100644 index 0000000000..80dcab8a40 --- /dev/null +++ b/meetings/2019-11-04.mdx @@ -0,0 +1,115 @@ +--- +title: "SDF's Next Steps" +description: "This overview highlights validator operations and anchor services." +authors: [denelle-dixon, jed-mccaleb] +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Denelle Dixon closed day one of Meridian with a forward-looking address on SDF’s evolving role and Stellar’s long-term direction. Speaking from the perspective of her first six months leading the foundation, she emphasized SDF’s identity as a nonprofit steward and ecosystem convener, translating open-network principles such as transparency, interoperability, and trust-by-design into practical support for builders, institutions, and regulators. + +The talk focused on aligning governance, resources, and community momentum as Stellar moves from early growth into a more decentralized and durable network. Dixon outlined what has worked well so far, where SDF has fallen short, and how a restructured treasury and clearer priorities are meant to support sustainable adoption while reducing the foundation’s centrality over time. + +### Key Topics + +- What SDF is (and is not): a nonprofit with no shareholders or dividends, focused solely on long-term network adoption and ecosystem health. +- Decentralization progress: increasing validator independence and a maturing governance feedback loop from community discussion to protocol change. +- What’s working: scalable technology, a growing global team, stronger community coordination, and expanding ecosystem projects. +- What needs improvement: stellar-wide marketing, policy and regulatory engagement, ongoing transparency, and more effective lumen distribution. +- Treasury reset: announcement of a 50B XLM burn, with remaining resources reorganized to better align with mission and execution capacity. +- New funding buckets: operational runway with a multi-year release plan, ecosystem and infrastructure support, currency and liquidity support, use-case investment, and user distribution through applications. +- Enterprise Fund model: the ability to invest in or acquire companies to accelerate priority use cases, with returns recycled back into the fund. +- Regulatory approach: emphasis on education and consumer protection, positioning the network layer as internet-like infrastructure. +- Q&A highlights: sustaining SDF beyond the initial multi-year plan, improving DEX liquidity, overcoming network-effects challenges, environmental advantages versus mining-based chains, and inclusion as a requirement for mainstream adoption. + +
+ Video Transcript + +[00:00] All right, everybody. It's been a long day but I hope a very productive one, and it is enclosing the first day of Meridian. I am so pleased to present our next and final speaker, Denelle Dixon, the executive director of the Stellar Development Foundation. So thank you all very much and let's give a really warm welcome to Janelle. + +[01:00] Applause hi everyone, good afternoon. I'm back, as they say so. I thought I would start by just mentioning that I joined SDF six months ago, last Friday. So it's been a journey for me. I can't believe that it's been six months and yet, at the same time, I can't believe that it's only been six months. So there's a lot that I've learned about all of the work that you're doing, a lot that I've learned about Stellar, and it's been pretty phenomenal. So I worked at Mozilla for seven years and at Mozilla, which is my role just prior to this, I was the chief operating officer. I did a lot of work focused on + +[02:00] The open web, focused on ensuring that government's didn't have access to technology with backdoors, talking about encryption and privacy and all of these issues that our core tenants to the open web- transparency. And so when Jed came to talk to me and he talked to me about this role as the executive director and CEO, I thought I had to reflect on how all those things that I'm so passionate about- openness and transparency and fighting for the rights of the humans that actually use the content side of the web- if all of those things could be equally translated over here to what we're doing here at SDF and what we're doing with Stellar. And good news is it is so it's been a pretty wonderful transition for me to see all of the work that has already been done and, frankly, a lot of the work that's done with respect + +[03:00] To blockchain technology. It doesn't get highlighted enough. I don't know if that's because of fear or ignorance, because of what regulators are concerned about or because of history, but one thing I'll have to say is that I think now is the time we need to actually really focus on getting more regulators involved, understanding what we're doing with blockchain. Now is the time for us to come together and to develop- excuse me, to develop- this community and to get all those out there to become part of it. I actually love one part of what I've done over the last six months in particular, which is talking to those folks who have concerns about blockchain and about Stellar in particular- those naysayers out there that don't really maybe understand the technology, and one thing I'll say is that you can overcome a lot of those concerns when you have facts and when you have momentum, and I think we have all of + +[04:00] That here, and so I've had a lot of success talking to folks, maybe even to some of those folks that I wanted to bring on to SDF to be part of what we're doing here, to really engage them in the value that we have in this ecosystem and what we can do and what actually Stellar and blockchain generally can deliver, not just in financial services, but outside of it. So the underlying tenets of the internet, all those things that I really fought so hard for the last 15, 17 years- I think they're actually more true in this space than in any other right now- standards, openness, the focus on not creating trust through contracts but creating trust through transparency, creating trust that exists because you can actually see, touch and feel. Those are the things that the internet was based on. You know back in the day with security they used to talk about. You don't have to trust me, you can audit the code. The same, is true in this space, even more so today + +[05:00] Than it is on the content side of, the web, and that's really exciting to me and I think, as a result of that, we just- have so much that we can do in this space, but we have to continue to focus on interoperability, standards, openness, transparency, because if we do that it's really hard to fight against the work that we're trying to get here. So I want to just mention again- I talked this morning a little bit about SDF as a role as the connector, as the bringing folks together the convener. And because of the growth of Stellar and what we've done here as a collective, it feels a lot like we're able to do that not just through things like Meridian, but through all of our work, and so I reflected a lot on the role of Stellar as we think about like what it is that we're doing here, what it is that + +[06:00] The role of the Stellar Development Foundation is as we get through this. And I think it's really important to talk about Stellar Development Foundation itself instead of Stellar, because many of you know that we Shepherd the codebase, that we do lots of things, but many of you probably don't know the details of what SDF is, and so I thought maybe I would go into that a little bit. First, we're a non-profit why is that important? We don't have shareholders. Why is that important? There is not one person who can actually benefit and who actually gets a dividend or a payout with respect to anything that the SDF does. That's important because that should build trust so that's a really important fact that I want you to think about. The other thing is we're a tax payer. We actually pay taxes in the state of California and at the federal government, and we're not a charity. And finally, + +[07:00] All of the capital that we have at the SDF is actually used to support and make Stellar than that work successful. So we talk about this as no shareholder value, no dividends, no EBIT ah, no stock price focus, none of those like those things that you talk about corporate America. We don't have those things we have. All we have to think about is making Stellar the global payment standard. That's our daily goal, that's our quarterly goal, So what we do is each of these things. We spend our time building and supporting Stellar in the ecosystem. We create awareness and education around Stellar and we develop and support Stellar use cases, so that these are the things that we + +[08:00] Do today. Some we've actually done better than others, but this is what we focus on. We have it says 54. It's actually wrong. We have 55. We just hired- and he starts on Wednesday- our chief marketing officer, which I know many of you have been clamoring for us to get. We just hired him. He starts on Wednesday, so it's 55. These are some of the titles of the employees at the SDF. We're across eight countries. We have an office in Brooklyn, New York, and we have people who are located either as contractors and employees all over the world, and this is important because our work is representative of what all of these people all over the world do we want to be here for a really long time supporting all of your work, and so this is something that I think it's good for you to know so that you could access us, you can talk to us. We have folks that are focused on integration alone, and these are important jobs and important roles + +[09:00] For the SDF to play. So now let's roles about the role of the SDF in the talk and I did talk a little bit about that as the convener and the connector, and I mentioned Meridian, and I really do think of the work that we do is bringing the whole globe together, so all of you, together to be able to focus on the work that we have ahead of us, which is really creating Stellar as the Global Payments standard. So there are things that we think through about. What is our role today? Like how important is the SDF to the ecosystem? I'm going to tell you that the role of SDF five years ago is very different than the role it has today. We're no longer at the center of the ecosystem, which is super awesome, and this is one of the representations the + +[10:00] Vote that occurred on October 28th to regarding the inflation mechanism is an important example of demonstrating the decentralization of the network, but also the role that SDF plates. We actually spent time looking at the dev forums. We saw the conversations in Reddit on key base and it's our role to sort of take the output from that and make a proposal, which is what we did. And then the vote happened on October 28th and it fundamentally changed Stellar, the network, and it was really cool because it definitely shows that feedback loop and how that feedback loop work loop works. We heard what you had to say. We put a proposal out based on the feedback and the network. The validators accepted that proposal. Another important way to determine our role is to think about Franklin Templeton. We learned about Franklin Templeton's plans to build a mutual + +[11:00] Fund on Stellar. Not from them. We got it from a Google Alert of all things, and it's demonstrates. For me, it was one of the more exciting days because it was like: this is the way that this should work. Things like this should happen without people having to come to meet with us and figure things out. That means that our documentation is better. It means that they have determined the value of Stellar on their own. That's exactly the kind of thing that we want to see in a decentralized network. And then, if you look at the nodes and just to think about the validator networks today, two years ago we couldn't pull our validators off the network because it wouldn't continue to run. Today it can, and this is going to get even better and better as time, as things progress. So, from our standpoint, this is a pretty cool place for us to be and our role is really solidified by all of you and the work that all of you have done as that convener, as that connector. So as we started + +[12:00] To think about all these things and think about our role, we came together to think about what are the next 10 years and what does that look like for SDF and how do we actually ensure that we're doing our job, which is to make Stellar successful, to make it the payment standard? So, not surprisingly, in doing this work we had to reflect on what's working and what hasn't worked as much as we would have liked. So what it's working- is our tatka. It's scalable. You guys are all contributing to it. Our people. We've built an awesome team of folks who are not only committed to the mission for SDF but are committed to Stellar and that's awesome. Our community building. Look at Meridian. You guys all came here to Mexico City to join us to celebrate all the work that you have done: + +[13:00] Many excellent and promising projects like IBM, Axio, reality bits, Satoshi Pei, Lobstr, StellarX, so many more, and our focus on decentralization. As I just went through, we feel like we've actually kind of cracked that nut. So what hasn't been working? So much? Our marketing. Some of you may say: what marketing? So we just hired our CMO. Yeh, it's not marketing for SDF, it's marketing for Stellar, marketing for all of you, partnership marketing as well as network marketing, our policy and our focus on regulatory efforts. We've done so much better over the last six to eight months and we're gonna get so much better. We have a General Council now who's focused on this and we also are hiring a head of policy who's going to be focused on that. From a global standpoint, our lumen distribution through + +[14:00] Grants and air drops- we've seen some pretty awesome results and yet we still hold a lot of lumens. So, from our standpoint, it's not working as well as we'd like. And our transparency around. SDF now tried to fix some of that today. But you can't just do it once and let it sit there. We got a continue that. So we're gonna focus a lot more on telling you more about what we're doing with the capital that we have- that wasn't me- and about what we're doing just holistically as an organization. That's our commitment to you. So, in reflecting on all of this and thinking about the ten years, we really refocused our mission. I mentioned this earlier today: creating equitable access to the global financial system. It's an awesome mission, as I mentioned. I mean awesome in the sense that it's big, but we're committed it. We're committed to it and we want to use not just Stellar, but all + +[15:00] The work that you guys have done to all this happen. And, as we've reflected on it, we also thought about what are some of the pillars that we need to put in play to help us to say, yes, this is the thing that we should do. And then to help us to say, nope, we shouldn't do this thing. It's not consistent with what we need to focus on. So we put these pillars together and we're gonna flush them out as an organization in more detail, so that we can put our own sort of goals and operational support behind them. But this is how we want to be seen. We want to be the blockchain that people know and trust- not for our own good, but for the good of all of you. We want you to be the good of all of you. We want you to be comfortable building on it and knowing that you're gonna be able to answer to whatever regulatory bodies you need to answer to. We want you to feel comfortable knowing that you can trust the network and that, therefore, you can tell others. We want to ensure robustness and usability of Stellar. Robustness and come encapsulate so many things: + +[16:00] Security, stability, scalability. All of those pieces foster and develop real, sustainable use case, Stellar use cases. So some of these will do ourselves. A lot of them you're gonna do and we're gonna help you, in ways that we have. We're gonna help you by marketing in ways that we maybe haven't, because we want this to be successful. So these are our pillars, these are the things that we think we need to focus on, and we think that these things can actually help us say no, too many things. So now, how do you make the plan happen? So let's first focus on our resources. Whenever we spend money at SDF, whether it's to hire an engineer, to put together a marketing campaign, to do any of those things, we have to ask ourselves: is this spent, moving us closer to the adoption of Stellar as the global payment standard? + +[17:00] We consider that every time that we do this. So this is our question. That we ask ourselves every hire that we make, all the different things that we do in the organization. And then we think about our resources and, as many of you know, this is what our resources looked like before I started this talk. So there were a hundred and five billion lumens that existed in the world. 20 of them were out in the world, 17 wherefore sellers for SDF s operations, and 68 were reserved for the giveaway programs that were administered by the SDF. So Stellar isn't mind, as most of you here know. So the lumens that are now in the public hands, are we worked really hard to get them in the public hands and we did that over the last four and a half years. As for + +[18:00] The other two allocations, there's been lots of thought about how we can distribute these and how we need to work on them, but the network and the community around Stellar are different than they were when we first started. They're much more robust and SDF can carry less weight now because of the way- or the network is so we're just a piece- of a much larger whole, and the funds that we use that we have need to reflect that. And so, right before this talk, we burned 50 billion lumens, and the way that breaks down is that we burned five billion lumens from the SDF operating fund, so we have 12 billion there today or now. We also eliminated the Stellar world giveaway program + +[19:00] And the partnership program, and so 50 of those 68 billion lumens were also burnt. So these are the remaining SDF lumens that exist as of this talk. So we believe that the number of lumens now better aligns with our mission. We're not gonna burn any more lumens. That's an important point. So, all told, as of now, there are 50 billion lumens that exist in the world. Approximately 30 billion are in the hands of SDF and I'm going to go through how we broke those out and 20 billion remain in the public. This new mandate + +[20:00] Reflects our desire to do more of what has worked at SDF and for Stellar and less of what hasn't worked. So let's dive a little deeper into this from an operation standpoint. You know it wasn't until really this year that we started hiring human capital, which, frankly, is the most important capital that we have at the SDF. It's the team of 55 people that do all of the work to help, to support Stellar, and so many of the folks that were out there in the world that would have before maybe received grants- partnership grants- are now employees of the SDF and are committed to the mission of Stellar- I'm excusing the mission of the SDF- and to support Stellar, and so we're going to continue to exercise that same standard that I mentioned before, which is just spending + +[21:00] This lumen on our operations, on the human capital, on the office space, on marketing, on all of these different things that we need to do. Are they? Is it moving us closer to Stellar becoming the global payment standard? So we're going to continue to do that with the 12 billion lumens that we have. We're also going to, and we actually are going to, issue a blog post right at the end of this talk, which has a lot more detail, so that you can read about it, but we're also going to lock these lumens up for three billion a year for the next four years, so that the SDF lumens that we use for operations, will release in mechanism over that time period. So let's talk about the partnerships. What was previously the partnerships program. We've broken that up into two, so we have the ecosystem support. We have a lot of great + +[22:00] Work that's been done with respect to infrastructure grants and the community fund, and those are going to continue with 1 billion of the lumens that we have now. I should have also said that each of these are going to be set in accounts that are going to be public so that folks can see them- and you can see when lumens are being utilized for these different projects or these different programs that we have. And so we also have currency support for things like anchors or supporting the quiddity in the market. These are things that'll come out of a currency support bucket of 1 billion. And then we have this new program that is the use case investment, where we've set aside ten billion XLM and what that's for is 2 billion of it will be used for the SDF to be able to build new products and use cases as we help to fill in pieces of the network that we think need the support, and we'll also that'll also show that will also have + +[23:00] Its own account and be visible to folks as they go through it, as we start using the funds. And then the Enterprise Fund, which is a slight twist, and I think a better way to think about partnership grants. Because what we can do with this Enterprise Fund now is we can actually take equity in companies, we can acquire companies and we can hold them as part of this enterprise fund, but we can actually have more control and ability to dictate what happens with respect to Stellar and their use of Stellar and the movement of those companies in to an on to the Stellar network. So our intention is: and then the, and then any companies that we acquire will be held in the name of SDF, but it'll all return back into this enterprise fund, any monies that are collected as a result of it. Remember, we're a non-profit or a non shareholder, so there's no entity, there's no one person here who gets access to any of the dividends from this. But these are, I think this is a really interesting way for us to be able to + +[24:00] Help to support the ecosystem in a different and new way. And then the old giveaway program which, frankly, I think had some really wonderful success, but again like to get the sixty, eight more billion lumens out there in the world. It was just going to be hard, given what we've already seen and the challenge that the challenges that we faced- some of them just technical challenges and some of them just getting them into the hands of humans- well, four billion of those are going to continue to go into the hands of humans. We're gonna focus on using applications that are out there building on Stellar, those. We can put funds into those applications so that they can actually just get into human hands and be useful to the network and to all of your work that you're doing. So that's that part of it. And then the two billion, as I mentioned, is marketing support and that's marketing for Stellar, but also marketing for the partnerships that come and build + +[25:00] On Stellar, those businesses, those developers who are focused on building on Stellar. We're gonna work with them, you, all of you, we're gonna partner with you to actually promote all the work that you're doing. So that's what that's gonna go. So here is what it looks like again and again, there will be a blog post too, so that you can take a look at it again. This isn't the last time you'll see this. And then a one question that I think we're gonna have to answer is: why burn it all? Well, we didn't start by saying we wanted to burn. We actually started by saying: what do we need, what does Stellar need and what is SDF need to be able to provide those things to Stellar? We had a very focused arrangement around only keeping what we actually could use, because part of the challenge that + +[26:00] We've had, and this weight not just on Stellar, but the ecosystem- was it as much as we wanted to use the lumens that we held? It was very hard to get them into the market, So that was the first premise of why burn at all, and the second one is: you don't derive a plan from some arbitrary number. And so if we just took the 105 billion lumens and then tried to derive a plan from it, that just doesn't like that's actually feeding the need that we have to do for the ecosystem. And so we really thought we did a bottoms up analysis and we thought: what is it that we need? And then how do we get there? So that's how we approached it and we were very careful in terms of plotting the next ten years. So now that we have our mission, we have our pillars, these strategic pillars that can help us to say yes and no. And we have all of you and all the work that you're doing and we have + +[27:00] Right size, the amount of capital that we have available to us and to the ecosystem, we feel we're like, we're poised for success. So I want to say again thank you to all of you for coming here to Mexico City to join us at Meridian. Thank you for being a part of our ecosystem. There are any naysayers out there in the crowd. I'm happy to talk to you and convince you about the facts, the evidence and the momentum of us moving forward, because I'm convinced and we're going to call that an afternoon. So now I'm gonna bring Jed up here and we're gonna allow questions. If any of you have questions, we have about 10 minutes or so. We're taking away your happy hour time. But this is important: we liked for + +[28:00] Meridian to be an annual conference. Yes, here's the microphone. Go ahead now that it's on target, now that you have rice right, sighs, as you said, + +[29:00] The amount of capital and you have vested those 12 billion over the next four years. The natural question is: what happens after four years? Well, I think that what we did is we blocked them up: three billion a year for the next four years. That doesn't mean they're all gonna be put into the public at that point, but I mean, honestly, the goal of the SDF was to be able to put those out as quickly as possible and so if there's some way that we could get them out there, that would be beneficial to the network. But the ten years- like this, you don't just blow all your capital in a four year period. You got to think about Treasury, gotta think about how you're gonna sustain an organization like this. So that's our job, like to figure that part out. It doesn't mean four years from now, that before four years from now, we just say, all right, we're closing it down. Absolutely not, we're in this for the long haul, but we just need to think about how we're gonna do that towards the future. One of + +[30:00] The most important parts of Stellar's that decentralized exchange. But I think the liquidity so low that boosted up there. Any plans to incentivize market makers? We actually already started making a market between Nigeria and the US and we have that's part of the liquidity piece that I mentioned in the end points. Yeah, I mean we try to incentivize that to some degree. I mean a lot of it will come from just getting more users on exchanges like Stellar port or StellarX, things like that. But yeah, it's a process. So several years ago, you made a concerted effort to not perform non for profit I think all of us agree that was the perfectly right decision. So what do you think isn't the biggest challenge and opportunity going forward to actually create an open financial global system? Well, there's a lot of challenges. I + +[31:00] Mean it's obviously that's a very big mission. I mean, I think one of the first things that we're trying to address is how do we achieve network effects, because this technology is not very useful unless there's a lot of different participants. So how do we get the first use cases on? And that's a lot of what we'll talk about tomorrow. So, yeah, I think that the- we call it the chicken and egg problem is the largest problem. But I feel- I don't know, and maybe I'm just drinking my own kool aid here- but I do feel like we're on the precipice of success for all of those things, and then you just need to get user adoption. So I feel like we all can come together to try to make that happen. But I do think it's this whole network like building the network in a way that is gonna bring everybody to the table at the same time. It seems like Global payment rail and remittances are kind of the Courtney's case tellers have been solving very well. I was attracted to it because of the developer, Doc's thought they were + +[32:00] Phenomenal. So what do you guys hope to see the community develop from maybe a consumer application or what are some non remittances or Global payment style apps that you'd like to see built? Yeah, I mean, there's a whole host of things you can do on Stellar, and I think one of the most exciting things for me is like hearing about projects that I didn't anticipate or imagined before. Like, for instance, today someone showed me this app where you can have people bid on advertising it in a space, like we could have a thing here and people could like bid on it and things like that like just kind of novel things that you don't really think of it or not that easy to do prior to Stellar world. So I don't really have anything in particular in mind, other than just I know that I'll be surprised by the people, that the things that people build on there, and I'm excited by that. So I think one of the things we could do better from the community standpoint is- because I don't have an application in specific, but I think- the community working together to get to know more of what's happening in the community, because I actually think that all of you can support one another. Once you find out what's happening on Stellar- like + +[33:00] This thing that he just heard about- like we can actually build those things together. So we're gonna do our own brand of like bringing the community together. But I do think it's like being inquisitive, as I mentioned this morning. Why do people build on Stellar is a good question to ask. But also, what are they building? And I think once you find that out, you can see that network effect happens when you actually think about it from the community standpoint. All right, so we've been part of the community since 2016 and we've built a platform on top of Stellar. Now we are based in London originally. We've just relocated to Amsterdam and one of the things we still keep getting when we attend events or networking events in space is: we mentioned were built on Stellar, and many people go like: why Stellar and or we got the? What is Stellar? Is there any plan in the market in for more like standardized or like, I guess, a better arrangement for companies such as ours? Or even though the developers who + +[34:00] Actually are attending events or want arrange events to sort of promote Stellar with official support from the SDF? Yeah, I mean that's part of the marketing piece. It's also just part of the questions that we need to ask ourselves at SDF and ask all of you, as I just mentioned, why build on Stellar? It's different for every different like a use case. It's different as to the why. There are some things that are consistent, you know: the speed, the security, the scalability. All those things are consistent, but we need to figure out: like, what is the use kit? Why does it work for your use case? And we need to promote that and so get people talking about that. So that is part of the work of our chief marketing officer that we just hired- super happy. He actually just we closed the hire on Saturday and he is going to start on Wednesday. So we'll have a lot of focus on that and we'll have it in materials that you, that everyone, can use. It's not secret to us. This is, like beneficial for the ecosystem at large, so we want to help you with that, I guess. + +[35:00] Ah, could you tell us a little bit more? How does the regulatory thinking fit into some of the roadmap and the plans for Stellar, interstellar? I would be curious to hear a little bit more about some of the potential hurdles did you foresee and what are you doing to address those? So I'll start and Judd can jump in on this, like from the standpoint of regulatory, like part of the hurdle that we have is that we actually haven't done the best job of it before when we go and we've spent. I think we've gone to DC now since I've been here, I think four times, three or four times and when we do and we talk to policymakers and regulators and this is just in DC and we need to actually make this global, because it's not just this government that's important in the United States. It's actually other governments that are equally important. But when we do and we talk to them about Stellar and the differences, and just even the structural difference between what SDF is and then what Stellar is and what Stellar does, it's kind of like a light bulb to them because it's unique. One of the things that I + +[36:00] Learned when I was at Mozilla that was really important is that when I would talk to regulators- and I spent a lot of my career doing that- and policymakers. It was from the standpoint of an entity that cared not just about its own products, but an entity that cared about the ecosystem at large. So the web- and, by the way, we also were building a consumer product in the market- and so it was helpful from the regulatory bodies for them to understand that we have a very similar look and feel here. We are a non-profit we have the notion of not just like the success of SDF- because, frankly, SDF would be nothing if Stellar wasn't successful- and so we have the focus on what Stellar is, which is all of you, and so I think that we have an opportunity to get through the first regulatory hurdle, which is the education piece, because, as I said, and just a little bit ago, like I feel like so much of this is about blockchain and the resistance to it. Is it because of history and a little bit because of fear of the word blockchain? And then that cryptocurrency word gets thrown in there and everybody just doesn't know what to do, and so + +[37:00] I think that it's these kinds of things that we need to talk through. Like the truth of the matter is the network layer doesn't have to be regulated, and shouldn't? It's very much like the Internet, and all of the entities that touch via are already regulated, and so we shouldn't have a lot of regulatory hurdles. But the focus on consumer protection for regulators, not just in the US, but outside and every other country, is a big one, and we need to tell them why that's okay, why consumers are going to be okay, and we need to show that to them. So one of the things that we pointed to in the pillars, is for us to focus on. To be the blockchain. People know and trust, and that trust mechanism comes from also getting governments to understand that we can be trusted. Also, this network can be trusted in. The people that build on it have opportunity to be able to create that trust with their consumers. So I feel like their hurdles, but we can achieve them, And then, of course, there's like regulation and we just need to focus on the right regulation and really advocate for the good parts and explain why the bad parts don't work. + +[38:00] Kind of a different kind of question. I would ask you if you could do you consider yourself as being an environmentally friendly and responsible enterprise because of the fact that you're doing something using a technology, that an algorithm, that is not as power consuming as Bitcoin or other technologies. Yeah, I mean, I kind of think about that inverse where, like, the default is to not you know burn, you know tons and tons of coal or whatever kind of power needs to power the thing. But yeah, so I. But we're definitely have way less environmental impact than something that requires mining. I can't see, okay now, okay, I + +[39:00] Can't see, like, way up there, this whole place is male dominated if you're looking to do more marketing, if you're targeting mass adoption, you are missing half of the population. And how do you address the issue in a blocking space? This is not just Stellar conference, this is just everywhere in blockchain, obviously- and maybe I don't have to say this- obviously, something that's near and dear to my heart, and it's not just + +[40:00] In blockchain, it's actually in tech generally. So I've experienced it for most of my career. So I'll just say: at the SDF, we're hyper, focused on inclusiveness, not just from a gender standpoint, but from lots of different ways that we think about being inclusive, and so we focused our hiring that way. I think that we would love to see more gender equality in this space. That's a lot of work that you guys need to do. She's right. You look around here. Not as many women as we would like to see. We need to get that to be higher. We're gonna do our part to do that by supporting all companies, not companies that are just created and developed by men, but companies that are created and developed by women, and every other kind of diversity that you can think of. So that's our part, but I think it's a request to all of you to do. Think it's and I think that, in order to the same, whole ecosystem better, you do have to think representative of all the different players out there that are your ultimate consumers in the space, and I gotta say + +[41:00] Women are an awful lot of them. So it's work that we can do collectively and we need to spend the time and effort to do that and we're gonna do it. So I'm committed to that ourselves. Applause all right, thanks everyone. All right, thanks so much, everybody. It's given us a lot to talk about over some drinks, which you will find where you had lunch. Thank you guys so much for a very successful day one at Meridian and I had a great time with networking and drinks. + +
diff --git a/meetings/2020-04-10.mdx b/meetings/2020-04-10.mdx new file mode 100644 index 0000000000..968ec60323 --- /dev/null +++ b/meetings/2020-04-10.mdx @@ -0,0 +1,86 @@ +--- +title: "Engineering Talks - Practical Path Payments" +description: "This overview highlights orderbook trading, anchor services, and network fees and resource limits." +authors: kalepail +tags: [tutorial] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Tyler van der Hoeven kicked off the Engineering Talks series with a practical, developer-focused walkthrough of path payments and why they are one of Stellar’s most powerful primitives. He framed the core idea simply: traditional (“vanilla”) payments move assets, while path payments move value. By routing payments through the decentralized exchange and multiple order books in a single atomic operation, Stellar enables complex conversions without users or applications manually touching markets. + +Through live demos, Tyler showed how path payments can be used as market orders inside a wallet, as well as how they scale into more complex, enterprise-style flows. These examples illustrated how trustlines, order books, strict send/receive constraints, and atomic execution combine to make cross-asset payments reliable, composable, and developer-friendly, even when liquidity is fragmented. + +### Key Topics + +- How path payments differ from standard payments by converting assets across the DEX to deliver value, not just tokens. +- Using path payments as market orders inside wallets, including self-payments to acquire new assets without visiting an exchange. +- Sending one asset (e.g., USD) while the recipient receives another (e.g., BTC), with all conversions handled automatically. +- Enterprise-style flows where market makers seed orderbooks (e.g., USD/EUR) and wallets route payments without holding intermediate assets. +- The importance of atomic execution for price certainty and user protection within a single ledger close. +- Liquidity considerations, failed paths, and how strict send/receive settings safeguard users. +- Why market makers and better tooling are critical to scaling path payments and the Stellar DEX. +- The broader implication of transacting in value rather than assets, enabling global payments across currencies as long as markets exist. + +
+ Video Transcript + +[01:00] Good morning, still our community- or afternoon, wherever you may happen to be evening, I don't know, bill the night. It's worth it to show up, for this is the first of our Stellar engineering talks. My name is Tyler van der Hoeven and we're gonna be talking about practical path payments today. If you have questions along the way or already have questions, we have two places where you can put in your questions. Either on the Stellar events page - `stellar.org/events` - click on the practical path payments and then the bottom right hand corner is a little box where you can ask some questions. Or, if you are tuning in + +[02:00] Via YouTube, you can just ask your questions there in the chat and we will get to those closer to the end. The goal here with our engineering talks is to have relatively short and very pointed discussions around specific aspects of the Stellar blockchain. So today we'll be talking about path payments. So a path payment is a means of it's a payment right. So if you've ever used payments on Stellar or you're essentially just passing a set a from holiday to, why would be that they receive the same asset. So this is very common. This is the way we think about payments today. I have a dollar and I give you that dollar and we've just made a payment. A path payment is quite different and I think I believe path name is to be one of the most interesting and important aspects of this Steller blockchain, and there's nothing necessarily new or incredibly + +[03:00] Different about path payments, but what they allow you to do with their very tight packaging, the feature that is a path payment kind of pulls together a lot of different aspects of payments and orderbooks and allows you to do things that you really couldn't otherwise do without a lot of headache. So, as we start to think about path payments, it really should get your wheels turning on. Oh, I wonder if I could do X Y Z or oh, this really allows something that we hadn't really had the ability or capability to do. So it's this idea of transferring value, not just assets, understanding and making use of one of sellers most powerful Sevilla weapons. A path payment is a payment made through the DEX, the distributed Stellar, distributed exchange or decentralized exchange. That probably doesn't make any sense, but let's kind of walk through a little bit. So vanilla pod payments: we've already our vanilla payments. We've already talked about that a little bit. Traditional, + +[04:00] Standard payments simply move an asset from address a to be nothing fancy. You have any asset and you make a payment to your friend who will receive any of those houses that you sent them. Nothing fancy, they're very essential, but they are simple, basic, boring and I think if we only think of payments in that way, we're missing a huge part of what Stellar allows us to do. So incomes- path payments move your asset. You're given asset through the decentralized exchange before arriving at the final destination. So we'll walk through some examples of what that means. But essentially, if vanilla payments move assets, path payments move value and that's fun. That's a fundamentally important thing to understand when I'm trying to think through what a path payment is: that you have an asset. You're not just trying to move an asset from someone to somebody else, you're moving the value of that asset. So whatever is + +[05:00] Behind, whatever that asset allows you to do, whether that's buying groceries, are paying rent or buying a house, or maybe it is the house. Whatever that value is you're, how do I move that value to the third party via the other person that I want to pay as opposed to just moving the asset. So if you have USD as an example here- USD, exilim or a gold token- you make it past a manure and who can receive any of those? Any one of any asset on the orderbook- the value of that thing. So I may pay you with US Dollars. And it goes through these different paths of maybe a pizza token which then goes to a gold token and finally arriving in your wallet as a euro token. This obviously is gonna require some orderbook stuff to make that happen and we'll take a look again in a moment about what those things are. But essentially, you never hold euros and + +[06:00] Your friend never holds USD. In this example that I just showed, never latigo stiva the value of my USD has arrived safely through the asset paths- a pizza and gold in that case to this In this final payment to you. So you foresee the value of my USD. You've just received it in a euro token, so let's take some. Let's take a look at the practical aspect again. You might be thinking that sounds interesting, but I don't know exactly how that helps me, either as a user or a developer of an application. So let's take a look at a market order on TV. So this is something that is very common in other exchanges, where you can just make market orders, right. So inside of our kiba's wallet here we are going to make a payment. This I recently discovered. This is quite fantastic use of path payment. So I'm just going to pay myself. So most the time when we think of payments, we're paying somebody else, but I'm actually going to pay myself. I'm + +[07:00] Going to send other assets. So this would just be the standard payment flow. If we click on send some other asset, I would like to receive U.S. Dollars. Let's say, I want to get five and I want to send- excellent. So this is essentially: I'm going to send 107 XLM to receive five US dollars. And so this will be a market order that essentially, I pay a hundred and seven lumens and I'm going to get in exchange for that five dollars. So this is a market order that I can use to pay myself five dollars without ever touching an exchange. All I did was spend time in a basic wallet interface. In am able to receive other assets- any of these assets that I've created a trust line for. So hopefully that will go through and we will receive some of those tokens. Sweet, we've gotten our five US + +[08:00] Dollars and we spent those US hundred and seven lumens to receive those five US dollar tokens. So fantastic, we've achieved market orders through path names. We never had to touch in exchange and that's the beauty ever right. So an exchange did happen and an orderbook was touched. I just didn't have to do it manually. It happened in the background through the path famous write an order on the order. Book of us of the anchor USD tokens was consumed. A hundred and seven lumens were spent on. Somebody had an order out there that said: I'm willing to take 107 lumens in exchange for five US dollars. I didn't know who they were, I don't care who they are, I just want to get five US Dollars and I willing to spend hundreds of lives to get that. The other obvious example is to make a payment to somebody else and we can use the same key base flow to achieve that. Now + +[09:00] We've got some US dollar tokens, we can pay. Let's pay our good friend Colton. Send some other assets. Let's see what he accepts Bitcoin. Sorry, buddy, I'm not going to give you one Bitcoin, but let's see it. Okay, we don't wanna make it wealthy off of this- and let's use our five US dollar tokens. Let's see. We have to send three point forward US dollar tokens to get to pick one. It actually looks like it's consuming a couple of different paths. So it's going to go to actually arrive in Colton's wallet as the 0 005 Bitcoin. It's going to go through a couple of different orderbooks, couple different order paths to get there. I don't care about any of that. But can you imagine trying to go into all of these orderbooks to actually make this exchange happen? I mean, for one time + +[10:00] Would be involved, and so the rate is probably going to be different between all that, but then it's also just very difficult to make that happen. You could do it and it could be manual and it could work. But wouldn't it be really nice if all of this could just happen in a single payment, never having to touch an orderbook, and I can do all of this from the comfort of my key base wallet? We are sending him too few offers. Looks like it. Looks like our payment failed. Probably we took too long to get it there. We'll try it one more time. Sometimes these payments could fail if one of the orderbooks isn't all that liquid. So I don't know how liquid the Bitcoin market is. I also don't know how key base is doing there pathfinding. We'll try this one more time. If it doesn't work, I'll just use my lumens again. + +[11:00] Hope a that time succeeded. Nice, so an interesting thing here. If we go to Colton's address and we pop this into StellarExpert here- this is on the public network- we should see that he's got himself some Bitcoin tokens that we just paid him from our address and we can even see the past that went through. If we look up this operation, yeah, so you can see all the trades that were consumed to make that possible- pretty interesting. But just a very, again, a very basic + +[12:00] Example of doing something it would be incredibly complex to do manually and something that maybe we do sometimes right, where we want to make exchanges through different assets to arrive at a final payment. The way we can do that very simply through just the- in this case, the key base exchange or the key base wallet. So, last but not least, a quote unquote Enterprise example. If we were to go to, I've got a path payment example demo set up that I use to help explain sort of the enterprise idea of path payments. And when I say enterprise, I really just mean all of the pieces. So we're just gonna go and create so we have this market right. So they're assets where things have to move. If I want to be able to send US dollars, the value of US dollars- to my friend and have it arrived as euro tokens, that requires that there be orderbooks set up for both + +[13:00] Of those things, both USD and euros. So we'll need to make markets. If we have a market maker here, he's going to need- somebody out there that has USD tokens and euro tokens and is putting those out, an orderbook which can then be consumed. Where is this path between US dollar to euros can somehow happen, because it doesn't happen magically. It happens off of orders that are already on exchanges, already on the decentralized exchange books. So now we have US dollar and euro markets made, so let's create our wallet, in our friends wallet here, with the example again of: we're going to have US dollars here and we want our friend to receive euros, so we're going to purchase with our XLM, we're going to purchase a few US dollar tokens, will notice the books over here will start to change. We just spent some XLM to get some euros, so we'll see the books over here looks like we just bought some US dollars with our XLM, so we + +[14:00] Have just under 25 there and if we click send, it's going to create the trust line on our friends account and then take the US dollars over to the orderbook here, turn them back into XLM so that it can go from XLM to euros and then send those euros to our friend. But we never accept euros and our friend never gets US dollar. It goes through the books to get there and our market maker is the one who receives euro into US dollar and X align so we'll click that. That'll create the trust land on her friends account. I didn't create the books. If we look at our books over here, I think we'll see this one change and this one yeah. So we've gone through the orderbooks and our friend has received 10 euros. We've spent some US dollars to make that happen and we can see over here that the excellent value for a market maker has been up. He's starting to make a little bit of lumen profit. Hopefully, if the spread + +[15:00] Here is good, market makers can make money off of the spread as users send path payments or use the books to send those path payments. So that's pretty interesting, and so those are three kind of basic examples, but also very powerful examples of how you can use PAP payments right now. If you're interested in seeing the code behind this- it's open source. There'll be links in the description of this video where you can go and play it Play around with this enterprise example. Our good friend Colton also has a very good document explainer on path payments that he wrote up. So make sure and go and check that out if you have further questions there. Enterprise example: I mean, I'm not gonna walk through all this if you want to take a screenshot or spend some time reading about. This is kind of our bullet point list of where the interesting + +[16:00] Parts are in path payments, as well as just kind of an explainer on why I think path payments are so valuable and I'm a very interesting use case, kind of here at the end, right, I would like to see market makers actually being made from within applications. So the. Yet when you switch off from assets being the thing that you want to the value being the thing that you want, when you can actually transact in value versus assets, like why don't in my bank account light up, why don't I have, you know, Canadian dollars and Australian dollars and euros and was? Because those things are valuable to me, because I can't use them, because I can't make trend, I can't do transactions in them. I can't go to the grocery store and buy groceries with Canadian dollars. I need US dollars. But once you have path payments, I absolutely can make payments with Canadian dollars because I can make a path payment and wherever I'm spending my money, I just say, + +[17:00] Hey, I've got Canadian dollars, but at the end of the day I would like the payment to be made, to be received in U.S. Dollars. And so long as they're active markets, for whatever the asset that I hold are, so long as that's a value by somebody, I'm willing to hold that. And so wouldn't it be interesting if an application took advantage of that fact that it's not the assets that are valuable, it's the value that those assets represent. And let's begin to transact in. The value and the liquidity can be created through a volume of users rather than a volume of assets. I guess there still is volume of assets, but there's no longer this limitation, this some localized a limitation of I can only use this asset as far as my feet can walk and my car can drive it's now. I'm willing to hold asset so long as they're valued by somebody somewhere, which is quite interesting. Okay, obviously there's a lot more we could talk about, but we'll leave it there for now. I'll answer any questions that people have + +[18:00] And then, as you begin to develop or have other questions, make sure I'm on key base and Twitter, so ask questions there. I'm around a lot. It's my job to answer questions and hope you could assist and implement these things, so feel free to reach out. Let's see, we've got one question here: the atomic nature of path payments and what that means. So atomic is really that something happens all at once, that there's not this process, this manual or time based process of: okay, now we're going to do this and then in 10 seconds later, we're going to do this and then we're gonna do this and this. It all happens in one package where all of the pieces- in this case of a pain moving through assets, moving through different orderbooks- they're all happening within a single transaction. So when you look at that path payment that I sent to Colton, it consumed a lot of different orderbooks that we're offering different assets, and so, as that value moves from + +[19:00] Exo m to US dollar, from US dollar to Nigerian naira, from Nigerian naira to euro, tokens or figure until khun's back to whatever asset you want, it's doing all of that within a single series of payments. So, instead of within Stellar, you've got this concept of Ledger's, so every five seconds a new ledger clears. This whole payment is going to happen within a single ledger which allows the fee structure. I guess they're like: how much of the asset, how much of the final asset is the last person, the last leg of the journey, and what are they going to receive? That's relatively stable because it's all happening within a single ledger rather than it being spread across. You might look at coin base and say, okay, a Bitcoin token is worth X and I've got a U.S. Dollar token, and so if I move it through Ethereum and then over to aji USD and then over to USD + +[20:00] C, so long as I can kind then over to USD C, so long as I can kind of do that quickly, there's a good chance that maybe it's like this arbitrage type thing where I might be able to make a profit, or it might be worth it to do these non atomic payments flow within Stellar, because you have this path limit, acknowledging all of that chaining together is happening all at one time in a single ledger for that one payment. You have a much greater assurance that the final value of this some person might receive, which is fantastic, and it also happens very quickly. You don't have to manually go in. You don't have a bunch of different api's connecting to different exchanges or watching the books and having to cancel or revert something. It all just happens and, as you noticed with the failure it may have when I initially look at the UI within key base, a certain path might exist. But then when I go to actually make that payment, it failed because one of those payments that it had looked up was no longer valid. The ledger had consumed it during the whatever was twenty seconds that I was watching. I was looking at it, waiting to make that payment, + +[21:00] And so it failed. And that atomic nature's also allows a level of security which is quite good. Can people all over the world use it now? Absolutely yeah. So anywhere there are assets, again, you can't just arbitrarily pick some token. I took enough to exist. There has to be some level of trades. I mean, again, you saw that payment fail. So at times there will be books that aren't that liquid, that the path is not good or it's not strong. You'll lose too much value if you actually go to these guides that are listed here. There's the stricter: strict send and strict receive, which basically says the end purse the end. A receiver has to receive X amount or the strict send. I'm only willing to send X amount, regardless of what the receive is. So there are some safeguards and if those safeguards don't clear, then the transaction will fail, which is good. But, yes, anybody can use it. It's just a matter of part of the assets that I want to send. Are there. Is there the quiddity? Is there, are there + +[22:00] Orders on books somewhere for the path to go through, which is again one of the reasons that I think this becomes very interesting when users are also liquidity providers- is they're willing to hold assets and put those assets on orderbooks because they recognize that there's opportunity there to make profit off the spread. Again, that's going to be a service based thing. You know, if a wallet allowed you to do that kind of thing, it might be an interest opportunity. I don't know, but it would be quite interesting to explore that capability of that positive, but anybody in the world can use it because it's the decentralized exchange. Anywhere ever you can connect to Stellar, you can begin sending these path payments and sending yourselves marketers, etc. Why does the Center DEX have lower volume than traditional exchanges. That's going to be, you know, the first to market type thing, as well as the kinds of assets they're held there. Again, when we're talking about the quiddity, nobody wants their payment to fail and so until + +[23:00] There are the liquidity is kind of like the chicken and the egg problem. Users don't want to use a, an exchange where either the prices are bad or the assets that they want don't exist. But then also businesses don't want to build a large, you know, build an enterprise on top of something where there are no users. So you kind of need both of these things to happen at the same time, which is again something I think is really interesting when it comes to: is there a way to kind of have both of those things exist at the same time, where users are liquidity providers and both of these things kind of happen at the same time, that it doesn't necessarily have to be a chicken and egg problem that they can. The chicken is the egg weight. That seems like the analogy breaks down, but hopefully you get the point and it is like the Stellar DEX is becoming more likely to becoming much more active. It may be on select markets, but it is becoming much more active. I think a big difference + +[24:00] Is what is the distributed exchange on Stellar built for? Is but for payments and sending assets across borders, and that's not true of a lot of exchanges, which may be more for investments or even for just I want to move or store value. It may be speculative and a lot of it's like first to market coin base is huge for stuff like that and by Nance and others, just by being first to market. So it takes time to build it up, but it so long as you have that liquidity and users and applications that are being built there, it definitely happens and it happens very quickly. And a big advantage of the Stellar exchange is that it's decentralized. There's not one entity that owns control over it, which is why you can begin using it right now. Today you don't have to go through a KYC process to start using the HT exchange, which is pretty great. Could we do a + +[25:00] Course on how to become a market maker? Absolutely, I would not be the person to give that talk, but we definitely have folks internally who could give talks like that. So it's a it's again. It's one of those really interesting problems that I think we're really only beginning to understand, or to see and explore the capabilities and possibilities. Understanding what does liquidity mean? What is a market maker? Not just traditionally, but what could a market maker be? What do we, altom utley, need for this thing to work? That would be a really interesting talk, one that I would love to sit in on myself. Would businesses? Some people use the system or is very different platform for businesses. Now, this would be the same platform. Again. We're an API software service, so we provide the software and then users and enterprises or businesses are going to decide like, okay, how do we implement this? And the way that you implement that might be on a service base layer, an API base layer. You may build a company around the way the pathnames work. I think again, especially in like wallet scenarios + +[26:00] Or venmo, where cash app type things built on crypto, it'll be really interesting to explore how to path payments work. I mean, just the ability to send cross border payments through an app would be amazing. But there is an aspect of that: The users need that to be a thing that exists. But then also businesses need to build services that utilize those. And then you also need anchor providers or this on off ramp where people can actually move their US dollar tokens and their euro tokens from the Stellar network to their bank so that they can buy groceries, or there needs to be implementations at grocery stores that are willing to accept those as forms of those tokens themselves, as forms of payment. So you don't necessarily have to move into traditional financial architecture, but a lot of that is still exploratory and I think, rather than assuming that we want to build something first for the entire world, like look at the problems that are around you or problems that you see in the world in specific areas, and build to solve for those and almost use + +[27:00] That as a test case- those and almost use that as a test case to say how can this be used and how can this and improve the lives of a million people and then try and solve for a billion people. So building something small first and then exploring that beyond. But yeah, like the functionality of path payments is incredible, both for from a user perspective and also from a business being able to make money off markets acquitting providers. You can also, like you now, have business access to everybody in the world, rather than just the West or just where you exist, like you can begin to expand those things out for where you can do business, which is quite cool. All right, well, that looks like all the questions I'm seeing, so I really appreciate you all coming out again. If you have further questions or need clarification on anything, feel free to reach out to me on key bass or Twitter. My user name is right there, t y vdh, and I'd love to answer any questions again. Thanks so much. Stay + +[28:00] Tuned. Like we do this every two weeks- this is the first one, but the 24th Expo. Two weeks on Friday, there's going to be an another talk- I think Colton is doing it- on the usability of Stellar applications. So another very important problem to solve and aspect that a seller has a lot to help out with. So be sure and tune in, and two weeks for that, okay dokey thank you so much for tuning in and we will see you guys later. + +
diff --git a/meetings/2020-04-16.mdx b/meetings/2020-04-16.mdx new file mode 100644 index 0000000000..cd85c7f5ab --- /dev/null +++ b/meetings/2020-04-16.mdx @@ -0,0 +1,163 @@ +--- +title: "Creating a Stellar Ecosystem Standard for Send / Receive Transactions" +description: "This overview highlights anchor services and network fees and resource limits." +authors: + - andy-wermke + - anthony-barker + - erasmus-hagen + - gbubemi-agbeyegbe + - lisa-nestor + - michael-feldstein +tags: + - community + - SEP-10 + - SEP-12 + - SEP-16 + - SEP-6 + - SEP-24 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Lisa Nestor moderated the first virtual roundtable focused on defining a new “send/receive” SEP that standardizes how anchors transfer funds to each other on behalf of end users. She opened by explaining how Stellar Ecosystem Proposals evolve from community discussion into GitHub drafts, and why this effort builds on existing standards like SEP-6, SEP-10, and SEP-24 rather than reinventing anchor interoperability from scratch. + +The discussion quickly moved from theory into real-world remittance workflows. Participants from Tempo, SatoshiPay, Cowrie, and SDF examined how KYC data should flow, which responsibilities belong inside or outside the SEP, and how edge cases like failed bank deposits or cash pickup name mismatches should be handled. The group emphasized keeping the protocol narrow and composable, while leaving space for bilateral agreements, compliance-as-a-service providers, and future extensions. + +### Key Topics + +- Scope of a send/receive SEP: why anchor-to-anchor payments need a dedicated endpoint, and which responsibilities (onboarding, business agreements) intentionally live outside the protocol. +- Relationship to existing SEPs: reusing SEP-6/24 request shapes, SEP-10 for anchor authentication, and debating whether SEP-12 adds value for this flow. +- KYC handling: when sending anchors can reuse stored customer data, when receiving anchors dictate requirements, and how third-party compliance providers might integrate. +- Transaction lifecycle gaps: need for clearer intermediate statuses (pending, info-needed), update flows for fixable errors, and better handling of cash rename scenarios. +- Refunds and failures: why full refunds are costly, and how update-in-place mechanisms could improve user experience for downstream bank rejections. +- Jurisdictional complexity: differing regulatory requirements across regions, and why high-touch compliance cases cannot always be fully codified. +- Ecosystem trust model: anchors are independent, regulated businesses, with reputation and bilateral trust—not SDF—governing who participates. + +
+ Video Transcript + +[01:00] Life. Okay, we are live. I'm gonna give this just another couple seconds to let everybody get settled and then we will get started. So thanks everyone for joining us. Okay, I think I'm gonna go ahead and get started here, all right? So hello everyone. Thanks for joining. Wherever you are in the world, my name is Lisa nester and I work on the ecosystem team at the Stellar Development Foundation, and today I will be your + +[02:00] Moderator for our first ever virtual roundtable, where the focus of our discussion will be on a new sub proposal that focuses on what we call send receive transactions. So we'll dive more into this and certainly all of the details of the send receive proposal. However, first I'll give a quick outline of the agenda. So I will begin by providing a quick introduction to everyone about what a Stellar SEP is and this Stellar ecosystem, will then move to the send receive depth discussion, providing a short overview of the purpose of this SEP as well as decisions that have been made today, and then we will open it up to our roundtable participants to start having a discussion and sorting through pending details. Then, at the end, we will also try and leave some time for Q&A + +[03:00] From any anybody who streaming in and participating this in this discussion as an audience member. So please feel free to add any questions on the youtube question chat box, as you'd like. So go ahead and dive into things. What is a SEP? So a SEP is a Stellar native term that stands for a Stellar Ecosystem Proposal, and so steps are publicly created, open source documents that live in a GitHub repository and facilitate the creation and eventual adoption of technical standards within the Stellar ecosystem. These are standards above the core protocol layer, so these are really eCos, some level proposals and standards so provide a kind of short overview. Subs + +[04:00] Are really a dynamic way of introducing new standards to the protocol for the ecosystem above the Stellar network. So these are things that really helped to develop use cases that are being implemented and tested by companies and individuals across the Stellar ecosystem and as new ideas or kind of new use cases are developed, they can be introduced in the form of a SEP to start creating technical standards and adoption across the ecosystem and it's worth noting that you know steps within the Stellar ecosystem. Much of them have been inspired by the IETF. Just to give a quick view of the SEP overview process for anyone that would be interested and doing this themselves or getting more involved in other steps that are being introduced. So this really starts with an introduction or kind of initial + +[05:00] Discussion. So there's a use case, there's a type of transaction, or you know a type of operation that someone wants to do in the Stellar ecosystem and there's not really a standard, easy way to implement this. So they will reach out and start kind of talking with other people, an ecosystem including SDF, to get a sense of other people would have a similar desire, they see a similar need, and also to get feedback on kind of other iterations of how that use case might work. After those kind of precept discussions happen, then an actual SEP is drafted and so you know this usually comes in the form of a pull request and the GitHub repository. At that point there's a lot of continued discussion, further iterations and merging of the proposal itself, which then will actually get changed into a + +[06:00] Formal draft. Once that draft is developed, then it will kind of go into an awaiting decision on standpoint where it's tested and implemented. But there's a period for final comments to be submitted by other people in the ecosystem that are using with the SUP, and it's kind of very early stage on. Once that kind Final Comment Period is closed, the SEP officially becomes active again. This is still you can think of as a very early period where the stuff is finalized, but there's still kind of testing, an early stage implementation of it, and then once we feel a SEP is really robust, we call it, we move it to a final phase where it becomes a full standard within the ecosystem. So just to give reference right now for this conversation, we're still in this kind of merging and further iteration. So there was a proposal that's been drafted and so we're now reviewing that and so you know, + +[07:00] I think to look at the big picture subs, are really a ecosystem. Each sorry steps are really a tool to ensure that we're facilitating an open financial network. They create standard and uniform ways for different types of assets or applications to interface with the Stellar ecosystem, and underlying that is an ability to create interoperability across various applications, wallets and assets across Stellar. So they're really a critical part of facilitating an open Financial network. Just a quick example is sub 24- deposit and withdrawal or anchored assets and wallets and the Stellar ecosystem. So this is a very it's an active SEP and it really has provided a way to improve user experiences across wallets, for users to be able to deposit + +[08:00] Them, withdraw balances across many different anchor tassets and it also ensures asset compatibility. So as an anchor or somebody issuing an asset, I can make sure that if I use the 24, the others twenty four wallet or application in the Stellar ecosystem will be compatible with my asset. So very valuable in that sense. Hmm, moving into the actual round table, I'll quickly introduce our participants. We have three companies from the Stellar ecosystem which will be speaking during this roundtable. We have Anthony from tempo money transfer, who's calling it from France. We have Erasmus and Andy from Satoshi pay, we're calling in from Germany. And we have boob Emmy, who is from Calvary System and calling in from Abuja- so, yes, from Lagos, + +[09:00] I'm sorry, I apologize for that- and finally we have Michael, who is one of our lead integration engineers at STF, who will be kind of taking the lead and help to facilitate this discussion for the participants. So, transition into this, we can kind of start by providing a quick overview of what this send receive stuff is focused on doing and we've had to date. So with that I will hand it over to Michael. All right, cool, thank you this time. So what this is trying to achieve. We already have two steps for wallets, pinker interoperability, so this is kind of a unilateral thing where it's one person trying to withdraw into their own real world Bank. The point of this SEP is to facilitate payments between two different people. So as far as the actual users are concerned, here it'll + +[10:00] Just look like I'm sending money into my other person's bank account. This is going to be facilitated, usually by two anchors who actually have relationships with each other, and ideally there will be a dense network of anchors in all different regions so you could send to anywhere in the world. But the important part is that it is between two different end user individuals. So our use case, you know, for example, business from Nigeria to Europe- and this is something that Kerry and temple have already started working on and have kind of used some of our older steps to make this work. And ideally we're gonna have a more clear, actual, explicit SEP for this so that when other you know if a third party, a third acre, comes on, they can have a business relationship and then just plug in and not have to do technical integrations or not have to do + +[11:00] Anything complicated. It's already gonna be built. So we've made some decisions already. You know nothing final. This is all. I'm just trying to put something out there so we can talk about. It looks a lot like SEP 6, mostly because what we've seen from other anchors, we've seen from Temple and Calgary, is that SEP 6 technically seems to work for a lot of this: the shape of the API, what it offers. Really it seems to work. So I didn't want to reinvent the wheel and make everybody jump through hoops. So what I did, and you can see in this draft PR: it basically takes something that looks like SEP sticks, create a separate end point for it and clean up a few things. So we took out the usage of SEP 12, and SEP 12 is a customer info server. So this is a way of + +[12:00] Sending. It's an endpoint just for sharing KYC information between two anchors. And originally we used this and what we thought is it just makes the flow more complicated. It's easier to just send the customer info with the first call anyways. So I'm gonna walk through. Do you mind if I share my screen, Lisa? Okay, I'm gonna do it. Has anybody here some experience with SEP 12? Because, to be honest, I had not seen it. They come up in many conversations previously. Yeah, it's something that you know. I'm curious. So that's one of the things I want to ask + +[13:00] About it is: do people feel like SEP 12 is necessary? Is it useful and helpful or does it just kind of redirect some usage and complicate this story? Yes, we've implemented cent well. Personally, I think it's useful. I like it goes back to that original goal of having and what it's experienced, such that you can do KYC once you can have it stored in the wallet. And then any new anchor going forward, you can be sure that you have done the KYC them, So I think it's useful in that respect. It really helps to use the experience that you can do kick the questing once and then they can have their one of it to manage the KYT. Anything you anchor who comes online afterwards, that user is + +[14:00] Sure that they can do okay. I see with Latin America, I just absolutely control. But it's in German. That's my proposal. Wouldn't you still have a new anchor comes on board, wouldn't you still have to redo that KYC with the new anchor? Yes, but the idea is, or my concept is, you can help. You want it actually strong KYC information once. Right, Amanda, when a new ad comes on board, you can share that already stored KYC information with the new haircut finest SEP. Well, does that make it easier? So, actually, let's come back to this. I'm gonna walk through how works so we can also be on the same page. They see audience aware what KYC means. So KYC is essentially, you know your + +[15:00] Customer, this is information, firm ation, about people trying to send or receive money. This can be simple as name or it can be as complicated as we need a photo of your passport with me, a photo of your face holding your passport, that kind of thing. So it can get complicated at times. Well, some of the my toast be stricter requirements and businesses- and I know your customer there's also know your business right, and then we really start opening out from Pandora's box, and so it really is a diverse topic. Can you guys see this separate? Now I'm confused as to what's happening with missing plane. Yes, + +[16:00] Yeah, so I'm just gonna walk through what the SEP we have currently is. So, at the highest level there is, you know, this is something that creates a product between my brother's already to end users and it's facilitated by two businesses that have rails between each other. So these, you know, one of the differences between this and something like SEP 6 or SEP 24 is that this is not a discoverable protocol. Any anchors that are going to be involved in this, any rails are created. They have a relationship up front. So it's not like someone can just pop up and say now I'm an anchor in Saudi Arabia and, you know, get money sent to them without someone agreeing that they're actually a real business. So in this example, we had Ellis in Nigeria who wants to send money to Bob in Europe. So she, Alice, will sign up + +[17:00] With Nigeria pay to make this payment. Reading about Bangkok, Bob doesn't need to know anything. All he needs to do is make sure that Alice knows his bank account information so she can actually deposited. So, Oh, looks like they used the word calorie instead of Nigeria pay. I'll change that. But basically this Nigerian anchor has a relationship with someone in Europe, a, and then they will use this protocol to actually transfer the money and tell Europe a where to deposit the money into paws bank account. So the actual protocol only happens between the two anchors. Everything that happens outside of the two makers communicating is not part of the SAP. We don't need any protocols for that. So when Nigeria paid on boards Alice and collects all of alice's information, this is just a web app or a mobile app. There's no interoperable protocol here. + +[18:00] So we don't want to try to define too. much, only the things that have to do with part parties interacting with each other. My listen, I just asked you. I, you know, looking at the feedback at the moment or on github? I'm just on your screen. I'm looking at the github, at the markdown and the github. Just wait a second. Are you on this? What you're seeing, what I'm highlighting right now? Yes, okay, I'm sorry. Yes, I'm looking at this. Why is this okay? I'm sorry, I was looking at the wrong screen. I had a second screen up, so this is what I just described there are. So here's how this actually works. There are four entities. There's a sending client and the receiving client. These are the actual people or businesses sending money and receiving + +[19:00] Money and then the sending and receiving anchor. So the sending client will set sign up with the sending anchor, tell them they want to send money to Europe. The sending anchor will decide who their anchor is in Europe. Send the tokens over and then the receiving cur will deposit into receiving clients Bank. When these rails are set up, each anchor is gonna find a counterparty who would ever reach anyone operate with and they're gonna trade public keys so that we can use SEP 10 than SEP 6 in SEP 24, where we use SEP 10 to the anchors. It's the anchors proving who they are to interact with each other. This is not a sub time for the end user. So when we- sorry, okay- when we do all the onboarding + +[20:00] And when we collect all the information for the transaction, this happens out of the scope of the spec. And so the first time to anchor, the sending anchor will collect the amounts, the any KYC information about the sender or the receiver that they need the bank count information, where it's going, and then that's when the actual initiation happens of the protocol. So I just got a message from someone asking if you could zoom in a little bit. It's hard to read, awesome, is it? I also we're gonna link to this repo specifically in the YouTube chat so people can go on that directly as well. Yeah, someone could drop that into the chat. I couldn't comment from my seller account that great. So this is where + +[21:00] Things start to look a little familiar. So now that the sending client has cleared the Sun, Waker when it wants to you, we can start this protocol advanced. So basically the sending client is going to choose their destination region. They're going to say I want to send this to Europe. And now this sending anchor says: okay, I know who my Europe anchor is. I'm gonna look up their Tamil file to get their sensor for API endpoint. I'm gonna look up their infant to see what fields they need. So this is very much like the steps, except 24 in phone point. They're actually just a SEP 16 point where it'll say: you know I can do a direct send for euros and I need these fields. I need bank account, I need routing number, I need first and last name of the receiver and using the information, I can make sure that, as the sending anchor, I've collected all that from my sending client and if I + +[22:00] Didn't- you know that's the second page- I can collect whatever I need. So once I've collected all that information, I'm making a post request to this send endpoint. So this is very much like the posts deposit, post withdrawal of SEP 6 and so it's so late. So this is where we were talking about SEP 12. Instead of using SEP 12 to send the customer info separately, we're looking at sending it all in one request. So this is just a single atomic request that says: here is what I want to send, here is all the customer information. And if I'm missing something, rather than say, rather than the receiving anchor tell me that, oh, you need to SEP 12 me some other info, they just say I also need the sender's last name and I will just try the whole thing again with the extra + +[23:00] Information. So the receiving anchor will just reject this send call until it contains everything needed to do the transaction. And once that's ready, once the sending anchor has sent everything to receiving achenes, we're gonna get back the all the payment information. So now, assuming that the sending anchor has collected the fiat, however that's going to happen from a semi client. It can do either a path payment to send the receiving anchor whatever it expects. So a three c maker is expecting to deposit euros, the sending anchor should send the receiving anchor euros, either via path name or via, you know, a regular payment where they've exchanged it however they want. But as long as the sitting anchor receives what they expect, then the SEP part is basically done. The receiving occur should uses that bank count + +[24:00] Information that was sent with the send call, do the deposit and then from then on there's just the standard transaction end point where the sending Aker can check on the status to make sure everything is going correctly. And so, yeah, that's the general flow of this. So a few things I want to ask about: were you know a the SEP 12 stuff be the idea of refunds is a kind of an open question. It's something that should be very rare, but in the case of a bank you know once the receiving anchor receives the tokens if they can't actually deposit into the receiving bank account. We need to know what happens there. So there's this part of the SEP where we actually describe what this is. Or is this something that happens outside of this back where, if it's a very rare thing, you know you just communicate with each other, since you have relationships with + +[25:00] Each other? Yeah, before I go any further, does anyone have any comments or questions on what I just talked about? Sorry if that was a lot. I'm sorry for the confusion earlier on the screens. So let me ask about this f12 stuff. Then. If the value of SEP 12 is knowing that I can just send, I can store the customer info and send it to whoever else, isn't that still the case here, where the sending Aker collects the kose information for whoever it is, they can store it forever because that sending client has create an account and they send it over via this descent endpoint. Doesn't that functionally give + +[26:00] You the same thing as using except 12? I think we need to make some definitions- is useful for an onboarding process, like a registration process. This particular call is not. It's not for onboarding, this is like an actual transaction, basically meaning it can be called many times. So in terms of just getting the required information, it makes sense the way you describe it. My book that gave her all the information is already complete because this is for an actual transaction and if there's anything missing, receiving a kit can immediately reply and by saying these are the fields missing in each these fields and that the sending actor can we try to transaction the. This makes sense in the context of actually sending transactions. + +[27:00] Whereas SEP 12 is useful, is actually for onboarding and the main differences on body requires identity documents. Writing this particular transaction, this particular end points are referring to the same, doesn't necessarily include identity documents. Right, it's more like personal information, which would be the receive this name, receivables, bank information, but do not hide entity document, sir. This is useful in the context of a transaction which is repeatable. Recep 12 is useful for an onboarding process and where identity documents are extremely so. One is it. One of the things that retama early on is the idea of what is part of the sap and what is not. So SEP 12 is really useful where you're onboarding to a service + +[28:00] From a product that's not run by the service. So this is really good for using. When we head SEP 6, if you have a third party wallet that wants to onboard with an exchange, SEP 12 is really important because this lets the exchange collect information from a wallet where the exchange is not controlling the UI- in this case, you know calorie controls, the UI for onboarding. So you can use SEP 12 if you want, but you don't have to do, since the party collecting the information and party displaying the UI is the same product, there's not really a need for an interoperable. If I think that's the key distinction is that if there's no two parties interoperating, there's really no need to define a SEP. + +[29:00] Michael, could I ask you that a little bit more? Just kind of definitional info on SEP 12, we've referenced it. It's, you know, yeah, so SEP 12, a customer information transmission protocol, basically what it is. It's an endpoint where you can post customer information. So let me start over the use case. So what? Like I said earlier, the use case is I'm an exchange and there's a wallet that wants to do to the posits into my exchange for a user. I need to collect customer information from them. To do my KYC. I need to collect personal lastname, a photo of a passport. So should be pioneer information strings. Whatever what this says allows is it says here are the fields that I need and here is + +[30:00] The end points that you can post them to, and then the wallet- who doesn't know anything about the exchange other than this is where I post the customer info to- can send information to the exchange. Yeah, I mean, that's generally simple protocol. It's. You know, a lot of these steps are very simple. You know this is just one endpoint. It's actually two endpoints. It also allows you to delete information, but it's essentially just. You know, it's just a standard that says if you're gonna collect customer info and you want other parties to be able to provide it to you, here's a standard way to do it with no UI or the UI is up to the product itself. We don't say anything about it. But can I ask? So? I don't have a lot of experience, as I mentioned, with this approach, but the + +[31:00] There's no way of fetching the raw data by this correct. What kind of routine? What do you mean? So perhaps I'm an anchor or an entity in and I'm being told that somebody has been chaos, eat. But for whatever reasons- regulatory compliance- I may need to run my own KYC again. I would like to fetch the raw data. If we have some sort of agreement between these two entities that we call so like share the data, of course, in a compliant manner, then I would also want to fetch that raw data, the identity documents, the identity of the natural person, all the documents I have uploaded, so this. So the way that we imagine is happening now is: you know, remember, the first step of the actual protocol here is: we have this info endpoint which I think a lot of people are familiar with, subsection 724. This, basically, this is + +[32:00] How an anchor will kind of advertise what it supports. So this is an anchor that says: I support receiving US dollars. Here's my fee information, and what you're asking about here is these fields. So this says: if you want to do a transaction with me, I'm gonna need the sender's first and last name, the receivers first, last, an email, and these can be any number of KYC fields, including things like passport photo or something, and then the information regarding the transaction. So this is another way that we kind of diverged from SEP 6 a bit. Is we separated sender, receiver and transaction information, whereas in SEP 6 it's all just fields and that works because there's only one party. You know, with this there's two parties, + +[33:00] There's a sender and receiver. So I guess first off does this. So actually there's two ways. One is this way: the info on point says: here's the fields I need. You want to interact with me? You're gonna have to send me this data. The second way is if you're doing a some someone's echoing, if you're not talking, could you sorry? If yeah, so if you have a transaction that's particularly high value you might say: oh you know, I originally I said I only need first last name, but I actually also need passport photos now and that's where you would reject the first end call and say you know, I need more information, I need this as well. And then the sending anchor would have to collect that and send it to you as well. Does that answer your question about how you would get that data from the sender? + +[34:00] Yeah, I mean, my question was more about SEP 12, like why it wouldn't be defined then also. Just, I mean at least some suggestion so everybody would have an agreed standard of also requesting that data. Right, there might also be interesting if we looked at sharing a compliance data across a network, but it maybe it's a bit outside the scope of this conversation if we're mainly discussing this new direct payment proposal. So, but it was more like more the direction of a compliant partner network. So you're saying you want a way to like kind of just have shared KYC ahead of time for everybody, rather than at transaction time, yes, for example, or just that. It's sort of the separating concerns a little bit, so that in the SEP 12 that would potentially allow a. I mean, we- I'm sort of segue into + +[35:00] A little bit of an absolute discussion here- but it would allow for a sort of a. An entity that's focused entirely on doing compliance would allow them to also, for example, provide an, a, an endpoint that everybody would know how to consume and where they could request the data according to certain rules. But it may be it's really relevant to this discussion because it would be more regarding the SEP 12 itself. So maybe we should pick it up. And can I say one thing, Michael? Yeah, of course, from a kind of European perspective, because the regulations are changing slightly and Germany was a leader in this- is that basically we need liveness tests on all new registrations, and so the whole concept of the SEP 12, of basically having the data and submitting it to a regulated entity in Europe, probably won't fly simply because we need to do a + +[36:00] Live nest egg on the hologram on their ID as well as on their face- pretty principle- if they're a sort of a expiry date and when that. So let's say that somebody does that three months ago and you requested the video recording of that lioness test, somebody sitting in front of an agent and holding up their passport, for example. If you know, they know, yeah, there's no expiry on the video. When the passport expires we have to request a new passport. But yeah, the issues that we as regulated and we have to do our own KYC, even if we're using a partner- what kind of responsible? So we can't 100% delegate- that we still have to hold, be held accountable. So it could be good to have a third party and in fact there's about three or four, like one, if I or some sub or there's about a + +[37:00] Dozen vendors out there will offer this kind of thing. But passing it between companies, it can work. You just need an agreement, a data protection agreement between the companies, like what we signed with bid bond, where they passed us the data they've collected with German KYC provider and they basically an API into the provider exactly. So that would be B, something along those lines, and I'm just thinking that perhaps at 12 could also assist in this sort of sharing a purse and then work right. So yes, of course it would have to be with a German agent in your case, for example, over the, I guess for the European agent that would have to do the lioness check, that would be like very certain my specifications for somebody in Europe compared to somebody in Africa, and so on. But the, if that independent compliance, provide a partner. They made sure to take all + +[38:00] The data accordingly, right? So this is combined with this regulation in these countries. It's not combined with these, if there's a way of signifying that right, and I could still work. And however, I appreciate this really is an axillary discussion- to the SEP. It is related and it's sort of also a little bit beyond the scope. So it's a very tricky to know when to stop this conversation and so I think it is like it's. The actual mechanics of it are outside the scope of the SAP, but I think it is important to just know that is a use case, the idea of having, like, a shared KYC provider. So I don't think it might not work for everybody, but I want to make sure, like I want to make sure that it does fit into the SEP, if people do want to use that. So we had thought about this and I think that I think the SEP does provide for that. So if you have a kose network, you have somebody provides, you know, kose compliance for + +[39:00] You, and the way I imagine it works is every client has, like every end user has- a token that says like this is the user. You can look up on the compliance provider and say here's the user, here's the token. Can, you know, provide the KYC for me and verify that they've actually been, you know, properly vetted? And I think the way that would work is that in this info endpoint, you know, we would say instead of first name we would say senders KYC token. That would allow you to then go back to your KYC provider and use that token to make sure everything's on the up and everything is what you expect. As far as actually building the KYC network, yes, f12 might be a good way to kind of facilitate that between each other, and I think that is a good use case where you + +[40:00] Actually you don't have to do proprietary characterizing everybody. Nd I asked you to mute when you're not talking. I think you're. You've got some echo going on there- oh, thank you, but do you think that? So I guess my main concern is: I just want to make sure that what the SEP provides doesn't disallow. I don't want people to have to add another ad hoc API to use this KYC as a service stuff. So just to I mean this is something that we're pushing for very much. I mean we, our service, it's very much in between different entities and the understood network, and one of the things that we want to streamline it's the compliance process. So we are very actively negotiating with the compliance as a service provider and inclusive, and, of course, is it something + +[41:00] That we hope to achieve. So, yes, I mean we would ideally want something on that. So I'm just to let you know, we're also doing something where we're just opening up. We have a partner who was requested and we just open up an API where they can then pull the information. Andy, maybe that could be a SEP as well like a standardized way of pulling, but they've been validated. The issue that we ran into is every country's got different documents. They require like we're in, we're working in Cyprus and they require in France we don't require proof of income, but in Cyprus they do. So it gets all very complicated very quickly and this is why we would want, really want a focused entity to working in this, that we don't all try to set up our own, but this is at least from our side what we + +[42:00] Would like to see: that we have an entity, dedicated and Stellar who provides that service and also works on standardizing these and then can go into the nitty gritty of quite figuring out the requirements for each jurisdiction and the different purposes, businesses versus individuals and so on. Right, and the only other comment that I have is that, if you look at one of the big guys, like remotely and cosimo, they typically do the became I see at the end meaning they receive the payment first and then they do the KYC after the payments received. So currently we're not doing that. We're doing the KYC first where we unset 24 or we pop up the interactive URL. And this is going completely off topic sorry, because with anchors anchor traffic, that's not an issue, but I'm just saying that with the KYC thing it might, + +[43:00] Following on Andy's concept of this, kos uses services. It might happen after the payment. Yeah, you could share something. That would be great. I mean it's really a good, it's like it would be. It's valuable for the ecosystem. I think if we would have an open standard there in time, and even if the requirements chained are very different between countries sometimes, it would still be valuable in the sense that you don't need to start from scratch but just need to add the data that's missing. So I wanted to see- I don't know we hadn't heard from UPenn me in a while, So I wanted to see if you have any comments on this and then also say that I also think this is very relevant to this discussion- would also suggest perhaps that we kind of make some assumptions around how KYC is being collected for + +[44:00] Kind of like a short term model of how this anchor transaction would work, unless that seems too difficult to do, just so we can kind of test or continue that discussion regardless different jurisdictions requiring different KYC. I just wanted to ask: isn't it up to the receiving anchor? So it's the receiving uncle who would always determine what the requirements are? So if you're in Cyprus but you are sending through tempo, therefore tempo it's what tempo requires. So in this particular situation, you're standing from Cyprus but the requirements would be tempos requirement, which means they don't need to think about such situation. Yeah, I just wanted + +[45:00] To ask about receiving Anka. Who determines what is required? Normally, we do the KYC on who's sending the money. So with the case of you guys, we do the KYC on cowering your folium, the bank. On the case of individual client, we do it on the client. So it just depends on who is the client in the case of you guys, you are our pot, so we do the KYC on you. And then, but then, because you're working with a regulated entity within Nigeria where you're doing the KYC in Nigeria and the you passing some additional details, so we do a validation check, but the person thought on some terrorists list or something like that in France, but we're relying on you and your banking partner to do the KYC in + +[46:00] Nigeria on the sender. No, now, if the amounts are a little bit higher, like with you guys, typically what we ask for- additional information- not proof of income, but you know what's it for? Like proof of the someone sending two hundred thousand euro or four hundred thousand dollars, and this an individual normally would better. Buying a house, we'll need a reason for the wire and then maybe proof of the purchase of the house. Or, with a company does need an invoice for the. Your company might need to send an invoice and that's not relying on us. That's often our downstream banks that require that, or our internal compliance officer. Yeah, so my question is that in this case, temper we have to follow the template requirements, so it's not determined by + +[47:00] Requirements that might country you. As the receiving anchor. You are responsible for determining what KYC used to be censored. This question of different jurisdictions I went. What I'm trying to get out of is the receiving honker always wins is to receive an actor who determines what KYC is required. So therefore it's always receiving- I'm Christa- jurisdiction, as it doesn't matter what the standing occurs. Jurisdiction came, I see, to require. Okay, I think, but in any event, I think this flow works for us. Michael, when I talked to you last, there was some information like the how would this work for a cash + +[48:00] Transaction, like a cash out? And the second one was a rename, like if the person- that the receiver name- was wrong and they needed to change it, or the bank account was wrong. Yeah, so in the case of if some of the information is wrong, I think what we do is we just have an arat, we just error it out. So, with a with an area that says you know the bank account is not a valid bank account or something. But what happens if it's after tempo, like it's a downstream thing? We sent it to our bank and maybe the bank at the other end- rejects it because it says account number invalid. So okay, yeah, that's, that was one of my big questions. I want to ask: is how rare is that case? And is that something we want to make part of the spec? So I could imagine a case where we + +[49:00] Have, you know, the, this thinning anchor might say: if anything goes wrong, send the money back to this Stellar account and we'll essentially reverse the whole thing. This creates bank accounts. I don't think we have a lot of errors. But we do with a cash. So if someone sitting to a cash transaction is a synagogue or Philippines, we often get, like I mentioned last time, things about three to five percent of the transactions. The person puts the wrong cuz. Then it might person might have a middle name, might have like three or four names, and so the sender puts two of the names. And so when they get person goes to pick up the cash, their passport says the whole five names and so they have to call us or contact us. and then do a rename. Okay. So this is where you're acting as the sending + +[50:00] anchor, not as the receiving right. And then a receiving anchor says: let me do a little bit of cash out. It's not our main business. But I was more focusing on Africa. Don't we do a lot? Yeah, okay, that's, yeah, that's a big percentage to just ever out, I think just airing it out is the well okay, no, sorry. You can't hair it out because it's already sent. Some. Four is kind of like an update payment endpoint or something where someone can change after everything is said and done, but it hasn't been paid out. You want to be able to update something. Normally what would happen is they pull the transaction status. We'd have it marked as error and we'd have an error type, like it'll be one of two things: it'll be in pending + +[51:00] Pickup or like a waiting for pickup status. This is cash still, and the person would do a rename and say, okay, instead of John Mark is John, it's John Luke and does it renames the last name: okay, and so currently this solution that like actually exists in the spec is to just issue a refund and do it again. okay. This creates problems because there's foreign exchange fees. Values change. Obviously. It's definitely not ideal. So again with our customers, they would expect it to be the same. They'd expect exactly. It'll be a giant nightmare if lifted. You'll refund, right, we do it, yeah, + +[52:00] Okay, then I think that's something that's missing. here. So we need to figure out a way for the receiving anchor to tell the sending anchor something's wrong. Here's what's wrong and here's how you can tell us how to fix it. If it's a, you can tell us how to fix it. If it's a fixable thing, right. So what they would probably do is give the reference, the transaction reference, the operation ID or some transaction reference, that we feed them back and then send a transaction update call. Okay, and since the spec doesn't have a way to update the transaction, only to create them, that's probably the missing, okay. Ah, it's a little more complicated because you have to tell the setting Inc or something, and that's a different direction that doesn't really exist in + +[53:00] The specs. So far, so okay. This is good. information. I'm gonna have to do some thinking on that and come up with a solution. But I agree that would make the product a lot better. This is kind of a big missing thing. Okay, that's fine. Have this resolved with the transaction and portable. So you would normally pull the transaction endpoint to find out the status of your transaction. So compass be solved with that endpoint. Basically, when you first send the transaction, they would return something like panting, pay up and then eventually, let's say, you call it a couple of hours later tomorrow, we're at that point. You know there's something wrong, then it would have an error description. Wouldn't that work? Yeah, I think so. The transaction object can + +[54:00] Have like a status, you know, mmm, info needed or update, eat it or something you know. We just had a comment on YouTube from Mariano who said: do you think an intermediate status could be added for all transactions? That by default status would be pending and then change? To share that comment and also give a five minute warning. Cool, yeah, I think that when you guys well said, just makes sense, have a status needed, update for transaction and then also make it so you can do a plate or a update, call on the transaction endpoint to actually change some of the information. I think that seems fine. One of the last things I wanted to ask about while we're all here is the idea of receiving KYC. What I've been hearing most of is that + +[55:00] There's not a big issue as far as like. to be run KYC on the receiver. Could you guys share a little about what requirements you have about the person receiving the money just really quickly. From a European perspective for mittens it's fairly like KYC, because either we collected directly from the recipients- like if they come to a store, a physical outlet to pick up cash- they'll provide their ID, so we don't need anything on the call because we're collecting it, or it's the bank account in and European bank account. We don't need an additional kose information. However, if it was a merchant- and we're doing merchants acquiring, which we've been looking into- then we might need to KYC the beneficiary because that in that case the beneficiary would be a merchant. + +[56:00] But I don't think. I think that's out of, scope for this specification. When you're acting in as the sender, do you ever need any kose on the receiving client? When you're descending anchor- occasionally we do, okay, but that's our compliance officer- would go to their compliance officer and typically ask for proof of the ID. But I think it happens. It's a very rare event, okay, but it could come on the transactions again, like I mentioned, if it's a very large amount, there might be additional proof of source of funds, or you know for an invoice or a company or something else. + +[57:00] We handle that side of expect if your receiver KYC doesn't come up much. If it's going into a back, then it's all handled by the back, has all became YC initially and it's no, it's- we don't at the moment, but if it's cash, then we connected it from the actual. We could click the KYC from the person who's coming to pick up the cash. So we don't need to rely on. Okay, Anthony, you're saying that sounds like, by necessity, a very high touch thing. It's not something that can really be codified for cash. No, for like a really high value transaction where you need proof of funds or something. Is that something that can become part of the spec or is that, like + +[58:00] You really need a compliance officer to be doing this manually? For me it would be maybe a next version of us of the spec. Again, each country is different, so it's a little bit hard to codify it. Okay, pending the anchors to it, I guess that would be done by pending senator, pending receiver. If you're gonna check on something that's more complicated, so that's fine. Yeah, it could be a pending receiver and if there was some free text or something like this, it could be good. Where you say a proof of source of funds are proof of Issa funds required. Yeah, cool, I know our time is basically over, but I just wanted + +[59:00] To highlight that we didn't have the chance to talk about the, our use case. Yet said is like if it involves a party, that it's not a traditional anchor in that sense, but another business entity. I don't know, do we still have time to quickly outline what different approaches or what features we would need in the SEP and to pull that off for this special use case? Maybe I'm gonna jump in here. Yes, I totally agree with you, Andy, and I think you know this just means we need to set up more of these discussions and more frequently, or definitely really focused on supporting the stuff in your use case as well. So I think for now we can't extend this specific session, but we can obviously continue to extend it offline and perhaps we'll set up another one of + +[01:00:00] These in the near future. So thank you all for attending. I also just wanted to address one kind of recurring question that we saw through our website and YouTube feeds, which was in regards to like: how do you know the quality of an anchor, or des, Stellar or SDF, somehow that anchors as part of its network, and so I just wanted to make the point clear that Stellar itself is an open financial network, a decentralized ledger. Anchors are enterprises and institutions with real world reputation and oftentimes, real world licenses and regulations that they're following. So anchors are vetted by applications or other anchors that are going to use those services. With the mastaba ecosystem, we have a system called, you know, Thomas, which helps to helps anchors + +[01:01:00] To publish their website information about themselves, their business licenses they hold, but we, you know, certainly are working in both and open but also reputational ecosystem on Stellar. So I just wanted to quickly touch on that and again, thank everyone for joining. This conversation is certainly not over, so perhaps even you know just getting started, but I think a lot of really great insights here, a lot of great suggestions, and so will follow up and continue to take things forward cool. Thanks everyone. + +
diff --git a/meetings/2020-04-24.mdx b/meetings/2020-04-24.mdx new file mode 100644 index 0000000000..0f5df721e2 --- /dev/null +++ b/meetings/2020-04-24.mdx @@ -0,0 +1,76 @@ +--- +title: "Engineering Talks - Creating Usable Stellar Applications" +description: "This overview highlights anchor services." +authors: kolten-bergeron +tags: + - tutorial + - SEP-26 + - SEP-30 + - SEP-6 + - SEP-5 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Kolten led this Engineering Talks session with a focus on usability rather than protocol mechanics, framing the discussion around how developers can make Stellar applications approachable for people who are new to blockchain. He described the main obstacles users face—key management, on/off ramps, trustlines, anchors, and the DEX—and argued that most of this “hard stuff” should be abstracted away so users can focus on clear, understandable actions. + +The talk emphasized that Stellar Ecosystem Proposals (SEPs) are the primary tool for achieving this abstraction. By treating SEPs as an application-layer standard rather than core protocol changes, teams can build interoperable products that evolve over time while delivering simpler user experiences. Colton illustrated how narrowing scope, simplifying interfaces, and educating users up front can dramatically improve adoption without sacrificing the power of the network. + +### Key Topics + +- Why usability is a major barrier to adoption, especially for developers and users who are new to blockchain. +- Treating SEPs as the glue between apps and the network, enabling interoperability without exposing protocol complexity. +- Federation (SEP-5) and mnemonic phrases as ways to reduce user anxiety around public and private keys. +- Account recovery via SEP-30 as a critical usability improvement for lost keys without handing control to third parties. +- Anchor standards (SEP-6, SEP-24, SEP-26) that let users move between the real world and Stellar without leaving their wallet. +- The progression from raw tools (Stellar Laboratory) to wallets, and finally to single-purpose apps with focused use cases. +- Examples of simplifying experiences through vertical apps like SatoshiPay, DStoq, and Vega. +- The importance of onboarding and education: showing value before sign-up, clear messaging, and localized translations. +- Encouraging developers to both follow existing standards and participate in creating new ones as the ecosystem evolves. + +
+ Video Transcript + +[01:00] Okay, I think we're live. What's up everybody? Welcome to this week's edition of Stellar engineering talks. My name is Colton, I'm the community manager here at the Stellar Development Foundation and I am NOT an engineer, but I will pretend to be one for the next 30 minutes or so for the sake of conversation. This week, I'll be talking about creating usable Stellar applications and I also wanted to give a reminder that this is a series that we're planning to do on a bi weekly basis, so every other week we'll have an engineer from SCF come on and talk about whatever they're working on, sort of internally at the foundation. So before we get started, I want to give a little background on this talk and why we thought it might be a good idea to host it. So lately within the ecosystem team, we've been thinking a lot about how we want to onboard developers into the ecosystem, whether that's, you know, developers who aren't native to the blockchain space. We're developers who are just thinking about creating financial applications or whatever, and a lot of them, whenever they first start + +[02:00] Working with Stellar, they don't completely understand some of the usability implications or they don't understand some of the technical aspects. So creating a resource like this as a sort of way for them to have something to reference and look back on to understand what they should be considering with building Stellar applications is sort of the basis for this talk. So let's see if I can change slides here. Yes, so basically, also before we get started, I wanted to spend some time defining usable. I understand that usable means many different things to many different people. Right, we have all these different applications within the Stellar ecosystem. They all serve different user bases, they all have different use cases and so usability kind of means a couple of different things across different services. So rather than think of this as like a set in stone definition for what usability means, I'm more of just trying to create a baseline for understanding what sort of makes applications usable or how to give your application a better chance of being usable within this Stellar + +[03:00] Ecosystem. So we also need to understand why Stellar can be hard to use, especially for people who aren't super into the space, right up there, encountering your applique for the first time. What are the some of the things that they can have trouble with? And some of these things include going from the real world to the blockchain, right, we all remember that first time, like how do I get my hands on this coin? Or how do I use a stable coin? Also, things like public and private keys are extremely difficult. They're not really something you're used to in your daily life. And there's constant. There's this concept of decentralization, right? What does that even mean? Where is your app on the spectrum, etc. There's things like security. So, for the first time, a lot of users are responsible for their own security: maintaining their own private keys, making sure they send their funds to the right place, etc. And then you have things that are Stellar specific, like the seller, decentralized exchange, anchors, trust lines, lumens, and so you can take all this stuff and bundle it up and just + +[04:00] Call it hard stuff, right? So how do we create applications that abstract away all this hard stuff and make use cases? I'm easy to understand and easy to use for, you know, the average person who's not into the black gene space. So I mentioned all these hard things. What can we do about it? The first thing is create and use standards. So within the Stellar ecosystem, we have what are called Stellar Ecosystem Proposal, or steps, and essentially these steps provide a set of standards for developers to implement when building Stellar applications. And these are things that evolve over time with the needs of the ecosystem. These are things that evolve. It's new use cases pop up and these are things that also evolve as technology improves. So steps are not this thing that's like set in stone on the protocol level- disease. They're these things that are constantly improving for developers to use. And the way I like to think about steps are just this category of sort of protocols that exist on top of the network. So oftentimes- and we're describing Stellar and some of its functionality- a lot of + +[05:00] People confuse that for functionality of the network. But really it's just a standard that we choose to follow to make that functionality possible. So think about steps is this thing that exists on top of the network, in between applications and Stellar itself. So it is possible for applications to just directly interact with the network without you know implementing any of the steps. But the problem lies and that you might be able to use the network but you might not be able to interoperate with other applications in the space, right like you might not be able to send a federated addresses, you might not be able to work with anchors, etc. So it's really important that if you're building an application in the space that you know mind the standards, try to follow the standards, pay attention to the standards and try to implement them whenever you're thinking about ways to build your application and serve users in the best way possible. Oh, I went too far. So some examples of what you can sort of do with SEPs or what problem steps can solve, are + +[06:00] Things like set to the Federation protocol, so you have a standard where, basically, we can map human readable addresses to Stellar addresses. I know a lot of you probably remember the first time you sent any significant sum of money from. You know whether it's your exchange account or your wallet, and you're typing in a public key manually and you are super scared that you might have forgot. You know your fur. You might have forgot one character or one letter or whatever and if you mess up you lose all your money. So what better a federated addresses do is turn these public keys and to human, readable addresses, like on key base where you have colton star key base do or whatever. This is super easy to send to a friend, super easy to read if you're a user. And basically it just takes the stress away from you know dealing with public keys. So whenever you're building applications, thinking about, okay, how can I make the stress of public and private keys easier to handle for my users, and federated there are the Federation protocol. It's + +[07:00] One way to do that. Another way is do something called SEP 5, which is a way of just defining how we should derive key pairs. And so one of the things that key SEP 5 lays out is this way to represent secret seas or private Keys as mnemonic phrases. So mnemonic phrases are basically like just a list of 24- 12 to 24 words. I believe that basically represent a private key. So if you're moving from one application to another, you don't have to copy and paste or write down this super long and secret key that you might mess up, but rather you can just write down human readable words and the kind of go behind. This is just: I have something that users can resonate with. Right, like it's hard to write down the super long string of characters, hope you don't forget a number and then you're screwed forever. So you want to come up with a way to make words easy to write down and easy to digest, so that if I mess up like, let's say, I misspelled deposit and I'm trying to move my secret key to some + +[08:00] Application to another application. I can figure out that- a misspelled deposit- but I can't figure out that I forgot a character in my secret key. That's much harder. So the other thing to mention is, like, this is something that's constantly evolving, right, and this is like the benefit of doing things through SEPs rather than trying to change them at the core protocol level. So there's also a step in the works called SEP 30, which is basically multi party key management of Stellar accounts. So this is a protocol that basically defines a way for an individual to regain access of a Stellar account if they lose their private key. So in the event that you lose your private key, which is a huge usability problem, not only in the seller ecosystem but in the blockchain ecosystem as a whole, you can find a way to recover that key without giving control of the key to a third party, right? Because if controlled, if a third party has controlled the key, then that means they can steal your money if they wanted to. Hopefully they don't, but it's + +[09:00] Possible. So coming up with different ways to make you know private keys and public keys easier to manage for users is an extremely important thing to think about when you're building your Stellar applications. Then next, we have standards for anchors and clients to interoperate with each other. Right, this problem of going from the real world to Stellar, or to any other blockchain for that matter. Right now the current flow is like you go to a coin base or you go to buy Nance, go to a coin base, or you go to buy Nance or whatever. You create an account, you have to buy some coin and then you send that coin over to your wallet and then you can maybe transfer it to a different coin and then send it to your friends, and then you have to go back to coin base and then try to get it out. So there's this like super convoluted process of going across different applications just to achieve a goal, and so we've created standards here in the Stellar ecosystem that try to solve that problem, such as: I accept six at point four, SEP 26, and so basically, this allows you to go from the real world to your Stellar wallet without ever needing to + +[10:00] Actually leave your Stellar wallet. An example of this is in practice. You can see. I think all Lobstr, our wallets, StellerX, etc. All of you support it. But an example on Lobstr here is that I want to deposit USD for Maker USD into my account, I just click deposit, I fill out some information, I click continue and boom. Anchor USDA interface loads up within my wallet. I never have to leave. And so the benefit of this is that you can all of a sudden Interop- interoperate with many sort of anchor services or on off ramp services inside of your application without ever needing to force your users to leave, and so basically, we can make all of our services on Stellar interoperate with multiple services, rather than like having to form a partnership with just wire right, you can choose a whole bunch of acres that you want to work with within your payment service outside of + +[11:00] Standards. We can also sort of simplify the experience, we can make stuff that's easy to use. Some of you who might have been in the space for a long time- might know back in the day, maybe the only way to sort of send or receive stuff like Bitcoin is through the command line. And, of course, like no, I don't think payment applications are going viral through the command line any time soon. So we have to make these things easy to use, right, and so, first steps to do stuff like that or what tools like Stellar laboratory? Right, you create a wrapper around the entirety of the functionality of the network and, well to, Stellar laboratory is a super useful tool. It's not something that you would consider like for mainstream views, right, it's not something that you want to give to users and be like all right, cool, now you can send payments or make trades or whatever. So, while this is a step in simplifying the interface, it's not quite there yet, right, so we have to go a step further, and that's where things like crypto while it's come in. So they take the complexity of the network and + +[12:00] They put it in a to easier to understand. Wrapper then say, like something like several laboratory, but even still, crypto Wallace can be pretty daunting, right, the use case is not immediately obvious for those who don't appreciate the complexity of the network or completely understand what they're trying to get out of it. So, by presenting all of this information users, it can help a little bit, but it might still be complicated for some. And so what can we do about that right like? It seems like we're out of solutions here, but we're not. We can actually continue to simplify until we simplify it use cases. So, if you think about where we started, we simplified the interface for interacting with the network and then we simplified use cases for interacting with the network. And an example of this is something like Satoshi Bay. Right, the only aspect of seller you're really exposed to when using Satoshi pays micro payment service is sending transactions on the network and then receiving transactions on the network. They don't expose you to a bunch of different + +[13:00] Functionality. It's literally as simple as deposits and lumens haze pay for some content, nothing crazy. Additionally, we see this explored through things like D stock, where, instead of focusing on the payments aspect of Stellar, they focus on the decentralized exchange aspect of Stellar. So now you can decide: ok, I need to buy and sell stocks. I just need to know the price and I just need to say how much I want to buy and sell and boom, I can do it right there in the interface. So I take away all of the complexity of deciding what I want to do by just presenting one single use case within the app. And another example of this is Vega. So Vagos, the dollar savings app that we're working on internally here at SEF, and essentially it just lets you send and receive different local currencies, as well as exchange in and out of those local currencies, but you're not exposed to a bunch of order bucks, you're not exposed to a ton of public and private keys. A lot of the stuff is abstracted away so that your overall experience using the application is + +[14:00] Super easy to understand, and so, again, we want to expose users to as little as possible about the network. Hone in on one thing that you want your application to do really well and just focus on that. It's not really necessary to support all of the functionality on Stellar, because it can be a lot to handle. as a developer. It's also a lot to handle as a user, and then the a lot to handle as a user, and then the next thing we can do is educate users. So you want to let users know what they're getting up front. One of the things that I find a lot of services guilty of, especially in this space, is that people will understand the value proposition right away. That's oftentimes not the case, and so you want to make sure that you can present that value proposition to them. Some examples of this in the real world are, say, like key bases homepage. So key base is an extremely powerful chat application but it's it can be difficult to use if you don't know what those value propositions are, + +[15:00] And so you have their home page where it. kind of goes through all of the functionality that's available to you and why you might want to use that stuff. And then another example on the right is something like robin hood. And what's interesting about this example is like, if you're downloading robin hood, you probably know what you're getting before you download it, and so essentially, I know what I was gonna get before I create an account. But they still take the time to educate me anyway. So, for example, they do these four slides where they talk about: you know you can invest in socks, it's secured, you have free stock trading, etc. They let you know what you're getting before you even click signup or before you even click login. So recreating this flow within your Stellar applications is just as important, right? Not everybody understands Stellar's value proposition. of interoperability and so making sure that they understand those up front, always good to do. An example of this in the Stellar ecosystem- and I + +[16:00] Think is one of the better examples, is Lobstr. So before you create a Lobstr wallet, they actually present you with these four screens, very similar to robin hood, where they tell you exactly what you're gonna get up front. Right trading platform. I can manage my assets- sick. That's exactly what I want to do. So when I create my account, I know exactly the functionality that I'm looking for and I know exactly how I'm going to use the app before I even get started. And then, additionally, they've spent some extra time to translate their app into other currency or other languages. Rather, so, if you're creating an application that's global in nature, which is absolutely possible on Stellar- you want to make sure that your service can actually onboard those users. Right, you want to make sure that it's translated for the target markets you're going after. You want to make sure that everybody can interact with your app- that you say should be able to interact with your app. Unfortunately, our community is sort of global in nature, and so it makes that process of finding people to do these translations really + +[17:00] Easy. Right, you can pay a handful of community members to come knock out translations and next thing you know, you can serve markets all around the world and easily onboard those users without any confusion. So, to wrap this thing up and keep it kind of short and sweet- the three things we went over is sort of following standards, following and creating standards. This is really important. It's really important that if you're a developer and you're creating a use case and there's some standard that you think can be derived from it, that you participate in conversation and that you do your best to try to come up with standards for doing things, and then it's also important that you do your best to follow these standards. They're there for a reason: they'll make your life easier as not only a developer, but it'll make your life easier as somebody trying to launch a product that's interoperable with other products within the seller ecosystem. And you can find these in the Stellar GitHub repo. They're all there. There's about 30 ish of them or something like that. So if you're building an app on Stellar, make sure + +[18:00] You're going through those, understanding them, understanding which ones makes sense for your use case and all that good stuff. Additionally, we want to simplify the experience, like I mentioned. So simplify interfaces right. Remove the stuff that's not necessary, abstract away the stuff that's really hard and then hone in on a use case. Find out what's you know. What are you actually trying to accomplish with your application? Is it a payments application? Maybe a little bit of both? What features do you need to present to your users and what features do you need to remove? And then, lastly, educate users right. Show them where the value is, show them what they're getting up front and make their lives easier whenever they're using your applications. You don't want to just dump an application with a ton of functionality on them that they'll never understand how to use. And that's it. So that's pretty much all I had to touch on throughout this whole presentation. I just wanted to go over some basics of + +[19:00] What to consider when you're getting started on Stellar, and how you can sort of scour the ecosystem to find resources that can make your applications usable. I don't think there were any questions. If there are any, feel free to ask them really quickly and we can, I can try to answer them. But other than that, if you have any further stuff, you can't get ahold of me on key base. My username is just Colton. Additionally, you can find me on medium. I write a weekly newsletter called the dev digest, or basically I cover all of this kind of stuff. So if you're interested in the latest conversations around ecosystem standards, I cover that. If you're like, if you're curious about the latest developments in the ecosystem, I cover all that stuff. So if you're interested as a developer and keeping up with the stuff that's going on within this, our ecosystem, you can easily do that just by checking out my medium. And then there's also some base. There's basic stuff such as like introductions to trust lines, introductions to path payments, all that good stuff. + +[20:00] So if there's no questions, I guess we can call it a little early. Thanks everybody for tuning in and remember to tune in. I think next week we're doing a set browned table and then the week after that we'll be doing another engineering talk. So thanks everybody for coming by. + +
diff --git a/meetings/2020-04-30.mdx b/meetings/2020-04-30.mdx new file mode 100644 index 0000000000..c664146ae8 --- /dev/null +++ b/meetings/2020-04-30.mdx @@ -0,0 +1,171 @@ +--- +title: "Continued discussion: Creating a Stellar Ecosystem Standard for Send / Receive Transactions" +description: "This overview highlights anchor services." +authors: + - andy-wermke + - gbubemi-agbeyegbe + - lisa-nestor + - michael-feldstein +tags: + - community + - SEP-10 + - SEP-6 + - SEP-3 + - SEP-9 + - SEP-24 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Lisa Nestor reconvened the Stellar ecosystem for a continued public roundtable on creating a “direct send” (or send/receive) SEP, building on discussions from two weeks earlier. She framed the session around a key gap in today’s standards: while SEP-6 and SEP-24 handle deposit and withdrawal flows well, many real-world remittance and B2B payments never touch a wallet at all. In these cases, users simply want funds to move from one bank account or cash point to another, and Stellar lacked a clear, standardized way to support that fiat-to-fiat flow between anchors. + +The conversation focused on two major extensions needed to make this model viable at scale. First was multilateral KYC, where anchors could securely share compliance results instead of duplicating checks in every jurisdiction. Second was enabling noncustodial wallets to initiate direct send transactions without becoming anchors themselves. Representatives from Tempo, Cowrie, SatoshiPay, Inclusive, and SDF explored how these pieces could fit together while preserving regulatory responsibility, user consent, and anchor control. + +### Key Topics + +- Purpose of the direct send SEP as a complement to SEP-6 and SEP-24, targeting anchor-to-anchor fiat payments where recipients never interact with digital assets. +- How SEPs evolve as open, community-driven standards to formalize new Stellar use cases as they emerge. +- Inclusive’s multilateral KYC (“EDD Connect”) concept for sharing KYC and KYB results across anchors to reduce cost, friction, and regional specialization. +- Regulatory considerations across strict jurisdictions such as the EU, Japan, and Switzerland, and the need for authorized compliance providers. +- Handling edge cases where receiving anchors require additional data beyond what originating anchors collected. +- Consent, privacy, and trust models for sharing KYC data, including which parties can access data and under what conditions. +- Enabling noncustodial wallets to initiate direct sends through SEP-10 authentication, key whitelisting, and anchor-controlled endpoints. +- Separation of concerns between KYC processes and payment execution to allow pre-registration and smoother user experiences. +- Use of memos and references so wallet-initiated payments can carry user-specified identifiers through bank transfers. + +### Resources + +- [GitHub discussion thread (pre-read)](https://github.com/stellar/stellar-protocol/pull/592) + +
+ Video Transcript + +[00:00] My name is Lisa Nestor and I work on our ecosystem strategy team at the Stellar Development Foundation, and I will be providing moderation support during our discussions today. So the topic on the table for this virtual round table is a direct send proposal. This is a discussion continued from exactly two weeks ago, on April 16th, and we will be talking through and finalizing the direct send proposal. First, however, I'd love to give just a little bit of background on what a SEP is. This is a Stellar native term and so SEP stands for Stellar Ecosystem Proposal. So SEPs are publicly created open source documents that live in a GitHub repository and they facilitate the creation and eventual adoption of, + +[01:00] Creation and eventual adoption of technical standards within the Stellar ecosystem. So SEPs are really a dynamic way of introducing standards and protocols utilized in the ecosystem built on top of the Stellar network and, in particular, as new use cases evolve throughout the Stellar ecosystem, new SEPs help to formalize those use cases and make them increasingly easy to implement and streamline between various participants. So, specifically, today we're going to be talking about the direct send SEP. People are also referring this, referring to this as a send- receive SEP, and the purpose of this SEP is really to enable Fiat de Fiat payments that would be facilitated between two anchors. We also think of this as being a SEP to support more traditional style cross border payments in which a sender execute the transaction to receiver and a foreign destination and really wants + +[02:00] A foreign destination and really wants that receiver to just have funds arrive in their bank account. They don't really want to interact with a wallet or a digital asset themselves. So, like a remittance or a business payment, the transaction is executed and the receiver can just the funds. To date, there have been some initial decisions made around this up and we've linked to the specific GitHub pull requests there and so to move forward, let me quickly introduce our participants. Today we have four different companies from the Stellar ecosystem, representatives from each of these companies that will be joining us. So we have Anthony from tempo, we have Andy from Satoshi pay blue, bemy from Kaori, claudio from inclusive and Michael from SDF. So thank you all so much for joining us and being patient, this + +[03:00] Joining us and being patient this morning, and so, finally, this is what today's agenda will be. We are going to work hard to talk through each of these line items. So if you're in the stream, feel free to kind of follow along with us, and with that I will hand it over to Michael from SDF to help kick things off. Thanks, Lisa. Okay, so really quickly can't go back to the agenda. Yes, so last week we talked a lot about the happy path for this proposal. I think we got it to a really good place. We had some updates from the conversation and I think that we're pretty happy with where it is for the happy path, where it's simply two anchors that have, you know, good financial partnerships or our financial institutions themselves. And then what we're going to talk about today are some additions that might be become a part of the SEP or might become a part of an additional SEP. Then make it + +[04:00] A part of an additional SEP. Then make it possible for a lot more parties to participate in this network of Fiat to Fiat rails. So the first thing I'm talking about is a well, is a multilateral KYC, then if inclusive is going to give us a, an update about what that is, and I think the general idea here is it makes a much easier way for multiple parties to share KYC data so everybody doesn't have to duplicate efforts and people who might not be able to get involved previously can through this process. And then we're going to talk about how non custodial applications, so noncustodial wallets, can start to execute these directs and transactions. So I'm going to pass it over to clutter. You apologize if I pronounced that incorrectly. No, that's fine, thank you. You're closer than this at them from us. + +[05:00] You're closer than this at them from us so what we are doing at the Cruces and what we are trying to achieve with multilateral KYC and what's, let's say, our role in the interstellar network. So maybe you want to see if they're focusing on compliance checks: KY c, KY b and c, DD, continuous due diligence, monitoring and reporting. But multiple k was motivated KYC as a little bit of background and maybe already touched in the last discussion. Like each anchor for each payment right has to do a KYC on the sender. The said, if we are talking from bank or anchor SEP which is on the agenda, and also the receiving anchor from the regulatory point of view, also + +[06:00] From the regulatory point of view, also needs to put a YC on the sender right and the center and cross on the receiver. So can we see multilateral also how we are calling its EDD connect. Basically we will enable through our guess product, which is also late guy based, to share this information. In medicine formation are referring to the KYC data without any personal information attached to it, right? So as an example, for example, let's say one anchor from Europe wants to do payment to an anchor from Nigeria, for example, right, and the anchor from Europe already onboard it. Let's say the sender using our chests, api's, and then also Canada from diner, from Nigeria. He's using our cassock, that's right. So when the anchor from Nigeria will do the center KYC, basically we are not performing another KFC check, basically + +[07:00] Performing another KFC check, basically we are just sharing the existing data and, let's say, report and history of the center which was already made for the anchor from Europe, right, so basically, this is Casey connect, OCD connect- right, and what this brings it to the table. It brings, I think, cost deductions. Right, because it reduce costs. It also makes the process more efficient, right, because, but just by enabling sharing data also, this process is almost instant, like it doesn't involve the basic LSE check. Nativity church doesn't involve too much manual interaction. But of course it can be extended with checks which will involve some, let's say, offline processing in case of, you know, you want to do extra due diligence on a, + +[08:00] You want to do extra due diligence on a certain entity or individual. Also, this is multilateral. AIC is not, let's say, enforcing an anchor to just, you know, do KYC in a single way. Basically it just you just try to provide the framework. But each anchor can have its own particularities, for example, to extend KYC, its own services or whatever you know third party tools they are using, right, what else? So we already starting discussion. I think our CEO is in contact with most of the bankers which are present right now in the call to agree upon, let's say, requirements and use cases which all of us try to fulfill on the Stellar network. So yeah, and we are more than welcome, everybody else wants to join. This is + +[09:00] Everybody else wants to join. This is huge initiative, okay, this exciting. Two reasons, I think what this gives us, at least a sorry about seasoning, okay. So this excites me for two reasons. One is what you said you mention it as cost savings and I think it's more than that. I think it enables. You know the cost savings come from right now every anchor kind of has to be a specialist in the KYC of every region, which is obviously a huge cost and also just it's not going to happen. So this kind of says you know this one company's going to focus on KYC in different regions and now everybody else doesn't have to. So that's a cost savings and it allows, you know, no company is going to be able to have a team large enough to really work all over the world. And the second thing, that + +[10:00] Over the world. And the second thing that it does- and correct me if I'm wrong here- this is this kind of goes to the last point in that subsection- was it enables people who may not be able to do actual KYC, people who are not financial institutions, such as Satoshi bei. This will allow them to kind of offload the KYC process on to conclusive. So when they start a process, they can get KYC done through a regulated institution like inclusive, and all the other parties in this multilateral agreement can accept it and that will regulatorily allow a Satoshi paid to start creating these transactions. So we you know there's two things blocking them. One is the technical way: does it fit into? The second one is the regulatory way. Will other parties accept payments that originated from a noncustodial wallet? So am I correct in saying that this will solve those, + +[11:00] Saying that this will solve those two problems. Yes, you're right, we provide everything you said and that's room. So, basically, that's our mission. You know, to provide easy access to compliance services and also, you know, to banking integrations. And then, yes, also from outside that sounds like that's what we're aiming for. Cool to hear from the anchors. Does that make sense from your perspective? I'm just kind of, given how you typically handle your responsibilities anchors to myself, how + +[12:00] Responsibilities anchors to myself, how much access we have, two simple KYC processor and then as long as we have, you know, like access to the data, and so on and so forth, we could probably still meet the regulatory requirements, saying that, yes, we did do this KYC, we didn't have access to the KYC data. And when you do your own processing, like you just said, this is only in your own home country, right? So you still don't need to specialize in kic for other countries, just yourself. Yes, perhaps to meet the referee requirement in Honduras, diction reckon each of us in country specific and in this multilateral agreement, is that something that's allowed, we're a, + +[13:00] That something that's allowed, we're a receiving acre can pull the data down or is it obfuscated behind a token to have the actual data to meet arranged to meet the regulatory requirement? So it can't be obfuscated. We have to have access to my data and insert the situation required to share that data with directly interests of. For example, if there's a little vest or something like that, they would need to see the actual basis. So bright, we confiscated we means actually access it. In the current situation where we process it ourselves moving, we, you know, we would actually process and store my data. So it wouldn't be a office cater to sighs, my regulate, so cardio, is that something that's possible with your, + +[14:00] Something that's possible with your scheme? Yeah, so basically, just to make things a little bit clearer, you, like you know, for example, energy, that you have the data. You want to do the KYC check on it, right, so what you will do, you will just call us and we will just see if those person were already KYC it? Right, and we have a report on them. And, of course, only if the institution, like the anchors that you are participating in this one, are billing, to share the information about them. Right, because each country might have its own degree place. You know in a certain card, any country maybe. You are not allowed to share the KYC information, even though if it's not a personal information, right, so basically, each anchor will own the personal information. Yes, we will also have it, but you will get back the KYC data. Right, so we will not get anything over skated on it, right, okay, any data that shared + +[15:00] On it? Right, okay, any data that shared from one anchor who's part of this multilateral agreement. That data can then be pulled down by a receiving anchor who is also part of the agreements. Exactly, it's a full data sharing agreement. It's not like we're just saying what we trust. That conclusive did it. So we're cool with everything. We can actually pull whatever we need down. Yeah, exactly, cool, okay, that sounds. It sounds like it solves the problem from Kyra's end. I don't know if Anthony from tempos and does that sounds? Does that sound right to you? Does this sound like it'll solve any problems from your end? It could. There's EU regulation and it's going in place where they have to be authorized by the EU central bank as a KYC provider. So that's something where, if inclusive, would have to get this authorization, + +[16:00] Would have to get this authorization from the EU. Yep and they, and it's right now. I don't think they've actually approved anybody. There's a bunch of companies in application process, but that's the current state in EU, and then we're also doing it in Japan, and in Japan it's a little bit more lenient. They don't require this sort of artificial intelligence on me, on the passport and ID or ID, and we typically consider the EU to be kind of one of them. The more strict jurisdictions- is that correct? That's my understanding. Specifically Japan and Germany in Switzerland, strictest, okay, and then France comes right after. I would say, okay, cool, + +[17:00] Okay, cool. And then I mean the nice thing about that is that's inclusive. Bread and butter is to get authorized by all these places so that everybody else doesn't have to. So okay, it sounds like to me that scheme does solve the regulatory issues, with satoshi pay- another noncustodial waltz- being able to, you know, get kose data into the system and to be trusted from that end. Lisa, can you share a link to that agenda could I could pull up? I think the next thing that we talk about is the technical aspects of it. So I guess we can look at some more these questions. So the strict we're asking if the strategy works for everybody. It sounds like assuming that + +[18:00] Everybody- it sounds like assuming that inclusive- gets authorization from EU to provide this kind of data, that this will actually solve a lot of these problems. We asked about if there's any geographies or scenarios where this may break down, and we kind of talked about how, you know, in Europe things are stricter but it doesn't break down as long as the providing company can get authorized in that geography. And then, if we're looking at edge cases where this may not work, I think one of the edge cases is the case where the receipt actually needs some more data. So you know, I'm imagining the situation where a sending client comes in a very lenient state, provides you know some data that might get them access to their originating countries regulations, but once you're trying to send the summer, + +[19:00] Once you're trying to send the summer and EU the case it might be that they need more information. There they're much stricter and so I'm curious how inclusive, would imagine, is working. Do we ask the receiving anchor to collect more data or does conclusive say: okay, well, now that you're going to be you, we're gonna have to collect more data. Do we have an idea how that work? Yes, so we had a talk and discussion internally about these and, to be honest, it depends on the customer right, because each customer has as its own needs, but how we want to address it is like each customer will, let's say, like a compliance customer of ours- can't define its own list of extra fields they want to collect right besides our core fields, which we provide by default for doing my C's KY based on individuals and entities right. And this + +[20:00] Individuals and entities right, and this is how we are ticket to bleed and, of course, if, for example, a field to make sense for us also to include it by default in the core fields, we can do this and, for example, I think I can give an example as a result of the discussion we had with Kaori. For example, they requested to have a custom field called the BGN, a bank coordination number which is specific to Nigeria, and to validate this field, we have to integrate a specific API from Nigeria to do that right. And then we have an internal discussion and what we decided- is this: is that: look, this is a core field for philosophy, because not only covering is with all the entities or any rituals from Nigeria and need this kind of validation if you want to offer them the compliance check they need right in that specific country and region. So that's one saying it depends on the case, right? Because if something is can be made as a core field, we will include it as a core field. and it's enabled by all the countries. So if + +[21:00] It's enabled by all the countries, so if a customer from a certain country or something region, right, if it's something specific, let's say not to a country or region, but to a company or entity or individual, it can be addressed but to configuration. So we thought on this and at least these are the two directions we want to go, if it makes sense. So when the POC is originally being provided, when an originating anchor initially collects the data, are they collecting that data with the knowledge of where it's going to go, so that they can say you know, they can tell you, hey, we want to send this baby you. What do we need to collect Or do they just collect the data for their own region and send it to you and then wait around in the process? The receiving anchor will then ask you for the data. Yeah, okay. So how I see this process is that I think to the SEPs which are + +[22:00] That I think to the SEPs which are available on the Stellar protocol. Like, each anchor will define its fields he needs for KYC care might be right. So, basically, when this information is exchanged between anchors, you know each anchor will not like, for example, maybe as an anchor which is using secrecy as compliance provider, maybe will interact with an anchor which does music. It doesn't use conclusive compliance, right, and this is. And 50 to correct me if I'm wrong. This is embedded in the intercepts, right to the fields you can share between anchors. So each anchor should be responsible to configure these fields, even if it's a constant field. For example, and maybe it's not on the, I think it's SEP 9 which has the list of the fields, right, it still can be requested to this exchanging of fields, right, and yeah, of course, if it's viable for you also can be also added on that list, right, okay, so it sounds like it's the responsibility of the originating in here to make sure that they're giving you enough data. So, but exactly, and of course some of the fields, + +[23:00] Exactly, and of course some of the fields can be made off from some of the tea fields can be made required, right, so you can have this flexibility, right to jump away the fields, cool, does anyone else in the edge cases where that scheme might fall apart? Oh, because it sounds okay. But generally, what we do- because, well, for the first time you might not necessarily know all his intended jurisdictions, Mike gave you my mother everywhere intends to send she going forth. But what we tend to do is, as we got a client, we would say we would. Often, which countries or which region? I mean sending two intense, eccentric, now in the future kind of thing and based on that, for example, if you have payments that are going to Europe or to include Europe as one of their destinations, then + +[24:00] Europe as one of their destinations, then we would try and acquire the KYC to make the information required for Europe. If it's the U.S. We were trying to quite finish, so we take no responsibility as an acrobat- engaged payments to get the correct information for correct destination and it's business to business payments and there's very involving onboarding process. We can ask the client front that, okay, are you going to meet with the US Rep against the name of the European which, and that means we can, we have a better idea of what can when submission. It would meet for the destination. Okay, yeah, I think that's all make sense. There's also the case where, for example, for a very large transaction, for + +[25:00] Example for a very large transaction, we might- you know, the recent anger might- need extra data, and I think this is fine. The way that it would work is in the SEP right now, the descending a Turk. I checked the status, the transaction periodically and it might find out about there is actually more data needed. So in this case, would they provide it directly to the anchor or would they provide it to inclusive? And until the anchor we've updated- they're kind of KYC profile with extra data that you needed, I actually don't think it matters. I think both ways would work just fine. I think, from my perspective, and at least how we are placed right now, I think, is the anchor responsibility because basically, having seen these-- Cruces with his compliance service sit under, with + +[26:00] His compliance service sit under, the anchor is not exposed to the anchor, right? Because all the all what you get exposed to the anchor is just the API definitions which are guided by the spiceps, right? So in any SEP right now, there is no mention of a compliance engine, right? So basically the anchor is responsible to integrate with inclusive through a SEP. Right? A SEP which collects the chaos information. At least this isn't if I'm seeing things right now. Maybe in the future can. Is everybody else still here? I'm still here. Yeah, I think we got a just a little, yes, delay there. Okay, I do have one or two comments or questions. So I one + +[27:00] Or two comments or questions, so I kind of on topic. One of them is: you know, Michael, we historically had a compliance service or compliance server as part of the Stellar ecosystem stack. Did you all consider using that and why or why not? Yeah, I think that. SEP 3- I'm just gonna pull it up real quick- there were aspects of it that we used for sure and there were aspects of it that didn't quite work. This is a good question and I think I should put some information about the I. The answer is: I would need to go back and kind of collect the answer to that. I don't have it offhand. I do know that we referenced, if you're talking about SEP, referenced + +[28:00] If you're talking about SEP- 3, we referenced it in a lot of ways but there were a lot of hiccups to it that made it more difficult. Its, Anthony, I can, I can add to that because we were using it quite a bit to compliance ever. It's a lot. The this new implementation is a lot simpler and cleaner and this, the compliance server, was a little bit compliment, complicated to implement and it's a little bit different from these, from the other, except 24. So this will keep everything kind of in with one authentication mechanism, SEP 10, and one kind of signing of the data, gotcha. That makes sense today, great. And so one other comment that all I'll bring in before we jump back into things. You know we're talking a lot about sharing KYC data between anchors. Can sharing + +[29:00] KYC data between anchors? Can anyone speak to how we ensure that this data is treated securely as part of this process? I mean, you know, how does a consumer using an anchor know that any personal information they share is going to be treated securely throughout this kind of anchor can we see exchange process? So I think there's a few aspects of that to look at. One is technically, how is this actually secured? And but I think the more interesting question is business wise you know we are sharing data between entities, so how do we ensure you know that your personal data doesn't get sent to some shady anchor that you know it steals it and does whatever with it? So from the technical side, I think you know everything's obviously encrypted. I think everything's + +[30:00] Obviously encrypted. I think this about this SEP makes it clear that this has to be all of our steps. Basically say, you have to use HTTPS, so everything is cryptid over the wire, everything is authenticated. Using SEP 10 was a cryptographically secure way to say that only authenticated people can access this. So people who are part of this network have all agreed on a public key with each other so that they can authenticate with each other. So I think it's safe to say that, for all reasonable intents and purposes, this data can only be accessed by the parties who are involved in this agreement. Now the interesting part of the question is how do we know who's part of this Agreement? How do we know who can actually access our data? And I think the answer to that is: you know anyone who's part of this is. + +[31:00] You know anyone who's part of this multilateral agreement knows who else is part of this agreement. So you are, you know, as a user you're giving your data to a company that you trust. If you're giving your data to a company you don't trust- like all bets are off, so you know you've kind of entered into this by saying I trust Howry to deal with my data and implicitly you're saying: I also agree that the people that calorie shares this data with I also trust, but we do get into kind of a more. You know that's a much bigger leap. Does anyone else have any thoughts on kind of that? The trust story here involved with personal data: yes, so from my point of view, or our point of view, I think it would make a lot of sense if the user would sign something like a piece of data that says I want to or I agree to, share my data with anchor X, so also anchor X can later with + +[32:00] anchor X, so also anchor X can later odd ZK. Was he provided conclusive can later proof that the use of was informed about that is that the data is going to be shared because I signed that piece of data. So it's like in a contract between two parties. Yeah, I think it's a much clearer story when you know a user comes to calorie, puts in their data, clearly they trust, carry with the data and then they actually go to send the money and it says we're gonna go through tempo. I don't know if the UI actually tells them what they're going through, but pretty much this thing. I want to put my data in for this transfer from you know between these two companies. So I trust that these two companies will treat my data properly. The question that comes up is: since we have this multilateral agreement, can a third company come in and just request you company? + +[33:00] Come in and just request. You know, request Michael Feldstein, data from conclusive. You know if a kind of sketchy company gets. So there's. I guess there's two questions here that I have for the conclusive product is: does every party be that's already involved in the multilateral agreement have to agree to a new party entering? And then second question: can any party at any time just requests data that's been entered into this pool or is it only possible when a user agrees that you know they want that's the other party they want involved with their data? Mm hmm so sorry for dropping off, but they have some internet issues. So it depends to ask you a question because I think it depends on the under regulations. For example, in the U.S. Regulation, by law you can share KYC data. Regulation, by + +[34:00] Law you can share KYC data, no personal data, but you can search every see date, right. So let's say, for example, if Europe is the same and by law you are allowed to share KYC data, what personal information data, let's say, is when an anchor from us is doing payments and transaction with an icon from euro, for example. There is no need for an agreement because we are not sharing personal information. You are just between ke wifey data and personal information. Yes, KYC data. For example, I've checked you on the sanction list of ABI. You're not there, but it's not showing the name. It was checked, right, okay, I checked you own off at least. Or UN list or something else. You are not there, right, or whatever sanctioned is, or address match or things like that. So you don't get in the KYC detain a person, any personal information. But the game actually is the result of processing the actually is + +[35:00] The result of processing the person exactly. So the anchor has to keep the personal information is just sent them, either the personal information or either on just an identification or a house or something that person information. We are still working on this and if we already have the KYC for that unique entity or person, we just share the KYC data, which doesn't contain any personal information. And again, that's why I said it depends. Because it depends on the regulation, like maybe some country or regions, you're not allowed to ship KYC data even though it doesn't contains any personal information, right? So for the case of sharing personal information, like we talked about earlier, this process does allow for a receiving anchor to pull down the personal information, like with any said. You know, they need to be able to share that with regulators. They need to know- you know things- actually get data. So does that process for an anchor actually pulling process for + +[36:00] An anchor actually pulling down personal data, how is that controlled? I think that's the control of anchor level and sub level. I think you guys already have this in the first. I think it was SEP 3 or something with Padma Franco- to anchor in which, when you were requesting personal information for a central entity or individual involved in the transaction from another anchor, that anchor would had like a check on this information like its shareable or not, or something like this right. So I think the death should be- and as you guys mention, I think somebody mentioned it should be- a conflict signed between anchors right to be able to do this: either two anchors either like but the entity or individual level, right, maybe when you start in a transaction, you can ask you: okay, are you willing to share your personal information? And maybe a note: if you're not willing to sell your personal information, it means that maybe the information, it + +[37:00] Means that maybe the transaction or something right. But is there any way for someone to say I'm willing to share my personal information with just these two entities versus I'm willing to share my personal information with anyone involved in the multilateral agreement? Yes, I think is possible, but, to be honest, I don't know whose responsibility it should be. anchor responsibility to collect this data and store it, right and to do the check. Okay, I am, I'm allowed to share the information with this certain entity or individual or with this provider, let's say, or released anchor, or it should be thing if responsibility to collect this data and went on this monster, that little thing just to check. If I'm allowed to share this, right, okay, we are not trying to share personal information, we are trying to share KYC data, right, so that's the best purpose, okay, it seems like an aspect that we really need to dig into and make sure that was a really good and make + +[38:00] Sure that was a really good question. I'm glad somebody brought it up. I'm not certain that we have an exact answer for this, but it seems like if we're gonna have this just this way- four acres to download personal data- we're gonna need some pretty good controls on who can actually do that. Otherwise, you know that's it's a broad pool of putting your data out there. So I guess a more straightforward question is the first one in this: multilateral agreements. How does that pool grow? Does everybody in the pool have to agree to every new addition? And just one thing, Michael: I do want to call attention to the time. So, as much as we can eventually move the discussion to the 0 3 I think that would be great to do so. But that would + +[39:00] Be great to do so. But don't want to interrupt you. I think that was we need to dig into that. a bit. We did it on the call, so, yeah, let's take a look at that next section. Okay, great, so, Andy, did you want to give a bit of an intro on this? Yes, of course. So we have a bit of unusual use case from an anchors perspective and considering what we discussed so far, because we are a noncustodial application and we want to take part in that directs and process. So right now we have two approaches that we are trying or discussing internally at Satoshi pay and for those two processes we have two different. There's two different flows. We have two requirements, like one each, how to. How would we take? Like one + +[40:00] Each, how to, how would we take part and would we facilitate the direct send even though we are not an anchor. So our use case is actually that we have a client who is in charge of their own funds- they owned their secret key- and we want to use that iOS and API between anchors or to another anchor. So, like we one option we are looking into right now is that basically, it happens anchor and we would need some way to initiate the whole payment flow, like we as a third party which need a way to tell anchor a descending anger and to actually start a payment to anchor B. This is one option that we have. The other one is from us from a other one + +[41:00] Is from us from a receiving anchors perspective, that we would basically be the sending anchor in some sense, but we would actually send the money from the user's account, from the clients account, or rather the client sends it and as we're non custodial and for that we would need to be able and as a direct sent proposal of states that the endpoint is not supposed to be public, there's like a white list of entities that are able to use that endpoint, usually anchors. We would not only be able to have us white listed as Toshi pay, but we would need to be able to have like sub accounts, so like we have a Satoshi pair identity and we have our clients and our clients would have. Our + +[42:00] Clients and our clients would need, like the keys of our clients would need, to be white listed to participate in that direct indirect send. I hope that makes it half way clear, even though it's a lot to paint in the air. so maybe we can get some kind of thoughts from Google, your Anthony, about how they imagine the ability to actually take these as the receiving end of this. Yes, yeah, it would be based on the piece. I think that would be the ministering forward implementation for every key. As the initiator of a transaction, which would be such as basic customer, we would need to know that key. I believe the accent makes you stop the step turn. Accent makes + +[43:00] You stop the step turn- ostentatious key right, so we can verify that an incoming transaction was signed by your original key. So basically, we just need to know or have a process where such a few people would register each individual key with us, maybe as part of their onboarding process or something. So they would say, okay, this key is lost to me, belongs to Stassi. She paid us to one of such as she pays customers, and then we would essentially whiteness that particular key and when second receive API it's being called, it would need to be authenticated with SEP 10. So we knew that this transaction was signed by this key, which was white listed to start. So, yeah, I think that would be the mystery, for yes, that sounds would be + +[44:00] The mystery, for yes, that sounds exactly like what we had in mind And the point, like what's missing right now from our perspective, is just that we need some protocol. How we do is a retro string part. Yes, essentially, we need to come up with a way to actually we could go as far as making it suspended, say that it's the same process, immiscible with so we actually have so first off, is it okay if we go ten minutes over, since we kind of got a ten minute late start. I'm good with that. Hey, yep, my, I was just actually messaging people about that. So in fact we can go ten or fifteen minutes over. So all the ten or + +[45:00] Fifteen minutes over, so all the hard cutoff, time being 9 45 a m yesterday cool so in that regard one of the other things that were working on tangentially to this is the ability to kind of pre register KYC so we were talking about for you know everything in the time I hear is person to person one of the other things that came up post business to business payments and this gets a lot more complicated because business you know business KYB know your business stuff is a lot more complicated and less standardized than know your customer so this is kind of a manual process and you know we're talking to tempo who does this manually and so it's important to kind of get this done ahead of time where we can say you know if you're gonna want to do business let's register you ahead of time and so we actually have not a SEP but kind of a proposal that we did where it's proposal that + +[46:00] We did where it's essentially an augmentation of sub 12 is the way to upload customer data to somebody but it's a protocol that says I want to register an entity here's their personal tell me what personal data you need from them I'm gonna give it to you and we can see if you're happy with them yet you need more I'll give you more data but this might be useful I think Andy you were on that thread right have you seen what I'm talking about yes I commented something earlier today actually I don't know if you saw it I just like oh yeah sure yeah so I wrote that the more I think about it the more it occurs to me that it would seem from a protocol level it might even seem cleaner to separate the actual transfer sets more from the KYC actual transfer + +[47:00] Sets more from the KYC sets that were from a KYC SEP so that the KYC can be done ahead of time and if and it could still be done in the moment you do the transfer because you either have not KY seed yet all the k was he that you or the data that you supplied it's not enough and in that case it would just instead of just telling you need to provide more data and like we do now if would basically it would be the same but redirect you to the KYC endpoint so that's a KYC endpoint would become like the single source of truth for all things KYC but of course is a tricky topic because it would require us to change the existing SEPs like SEP 6 SEP 24 also so this is a bit of a hot SEP 24 also so this is a bit of a hot topic I guess yeah definitely not eager topic I + +[48:00] Guess yeah definitely not eager to make changes to SEP 6 SEP 24 but for the process of this if there was a protocol where somebody could register a user linked to a Stellar key or rather a Stellar key and an optional memo for custodial solutions seems like it might allow you to kind of do this upfront registering and then once you know Kaori your tempo has accepted a client based on this registration process then you could you know then Satoshi BAE's wallet can initiate the transfer using that September memo come her I guess in your case Ville it'll just be a key for SEP 10 Mike said separation of concerns SEP wise and this is more the engineer talking from a Satoshi pay perspective everything that Satoshi pay + +[49:00] Perspective everything that enables us to also do the KYC ahead of time which was a job okay let me share that with you boo by me I don't think I actually sent it to you originally but I think this can allow for exactly what you're talking about yeah as long as you're willing to allow us to toship a wallet or a noncustodial wallet to register users in this way I think we can do that technically it doesn't seem that complicated what you're asking worst case if it's really an issue to have a noncustodial client supplies the actual okay wasted as their personal information worst case scenario maybe we can have a fallback to an interactive KYC but of course it would allow for a much course it + +[50:00] Would allow for a much smoother on user experience if we could supplies the actual data yeah we've been focused really on for this direct sends send receive stuff we've really been focused on non interactive because this is kind of you know the this is a communication between two businesses normally if I yeah I see what you're saying let's try to do the non interactive way and if you know if things really fall apart that we can start to look at the interactive you know taking a KYC form from the receiving anchor and showing it in the wallet similar to how SEP 24 does it I don't actually think we're going to need to get there really just gonna + +[51:00] Stay here the feels that I need yeah that sounds good for mind okay I feel good about that wonderful so do we feel like we have both kind of a short term wave for how we want to start testing and implementing this stuff and then kind of a median turn plan for building out something more kind of sustainable or long term I think they're very similar approaches okay it's Anthony Michael you asked me about SEP 6 I think last week mm hmm are you thinking of bringing that back or adding that to this new standard or the question I had last week was more kind question I + +[52:00] Had last week was more kind of a maintenance obstetrics so we had it seems like everyone the ecosystem kind of likes to continue to use SEP 6 so the thing that was asking about was just for security purposes currently there's a lot of personal information that's sent over a get request and in the URL which is URLs are not really secure they can be leaked through browser history or whatever so we just want to change that endpoint to be a post request which it sounds like you already did so it was two separate it had nothing to do with this I don't okay thank you okay great well so were there any last items that people wanted to discuss before we end the roundtable for today yeah I have one more thing actually because I said more thing + +[53:00] Actually because I said that like two paths that we are considering right now and we discussed the second one and this is quite actionable but for the first and we skipped that little bit in this case maybe we can do it very briefly and I just want to hear if anchors have some specific thoughts on that and this is if we were to do the direct sending the direct payments directly from an anchor to another anchor and we would just basically step in and like we would be the party initiating the whole flow in for the user and we would like just connect the dots and the user would then directly like trend send a payment a fiat payment for example to Z sending anger and the sending anger upon arrival anger and + +[54:00] The sending anger upon arrival sensor to the receiving anchor and it's a bit hard to explain now in such short term but are there any hard feelings from anchors about adding support for something like this or not adding it like that you allowed some other party to kick off the floor in this process does the sending anchor need to know anything about what's happening or does it simply accept deposits and issue tokens like an anchor has always done yes basically so of course and the anchor stone needs to control like the KYC flow request more data something like that I suppose but apart from that it's always like it's always been okay so what I had it's always + +[55:00] Been okay so what I had understood from your flow there was you the wallet itself would be the initiator of the transaction and from a receiving anchors point of view they care about two things one we have good KYC data we know who we're dealing with we've received proper payment you know we received the tokens we expect to receive for this payment they don't originally they were kind of conflated where the person who provides the KYC is the same as the person that sends the tokens I don't think that necessarily needs to be true as long as both things are true that they as long as both things are provided good kose data and proper payment do those actually need to you know does + +[56:00] If Satoshi they can provide KYC either through if inclusive or through however the anchor accepts and the receipt anchor gets the money they expect is that enough because if that's the case that and the sending anchor doesn't really need to know that they're part of this payment protocol they just need to know they've accepted some Fiat they've sent a token to an address just to say we do that right now with some of the crypto anchors like papaya bod and not nao BTC what exactly what do you mean we what is that you do essentially we sell the client like a nao BTC Bitcoin and then they choose to send to the Bitcoin address they're doing withdraw on that anchor but it's + +[57:00] Behind the scenes the client doesn't know okay that doesn't sound too bad an address that's whitelisted we don't really care like if he is a client of the partner or whatever the case same thing long as we receive a fiat payment corresponding to a section we don't really care as long as it a right if the only question I think really is do we have to connect the income via payment with the KYC of a the income + +[58:00] Via payment with the KYC of a particular wallet address if we have to do that is also possible I think it can be done because we were first of all we already have a business relationship and a business agreement with a special tree for example and then there's going to be white listing up one body or any one else address so as long as we move through that process when any send receive transaction that comes in we know that it's one that's you launch the strategy behind is rating whiteness didn't we already have accuracy for it so yeah it's a it's ounds overly complicated to expect that this sending you know a stunning anchor who's not actually originating with this payment is going to become a part of it but it is going + +[59:00] To become a part of it but it seems totally straightforward for them to issue the tokens to the wallet and then the wallet sends the money and in that case it's coming from an address that's been whitelist it has been you know registered in KYC properly yeah that's maybe the most straightforward thing to do cool III had one question on this external memo field for Satoshi pay because I've seen it before that wallets who are initiating transfers they often need their own reference to come in so then essentially this two memo field is an external memo and then our memo can you explain a bit how you should be that working oh yeah should be + +[01:00:00] That working oh yeah so from our point of view like you're referring to the external memo and the in like the Stellar transaction memo right exactly so we don't really care about the Stellar transaction memo but on pay out we would like to be able to add a custom text to the actual bank transfer reference that text that's attached to those bank transfers usually we know that of course you guys have to add your own text for referencing it later I guess but our goal is to also include some text or some code of ours so that the receiving party can like the text receiving party + +[01:01:00] Can like the text that we want to add is primarily specified by our client so if Anthony you are our client in this case you can enter some ID where you know or case is payment X and you would enter that in our wallet and we would sent this to Temple on withdrawal on payout and then the idea is that temple as this payment acts memo that you anthony as the user specified is that actual bank transfer text great thank you okay awesome so if that is kind of one of our last trailing comments then I'm going to go ahead and wrap this session up I think we've had a lot of great up I + +[01:02:00] Think we've had a lot of great discussions really excited about the progress we made on the multilateral KYC conversation so Thank You Claudio and also on the non custodial wallet integration into the direct sub transaction so lots of stuff there I know that there were a few points that we are going to need to follow up on that's the nature of these discussions so things will continue to be dynamic but thank you so much everyone for your time and participation today and thanks for everybody who is tuning in thank you sir thanks all right + +
diff --git a/meetings/2020-05-08.mdx b/meetings/2020-05-08.mdx new file mode 100644 index 0000000000..2293bb3079 --- /dev/null +++ b/meetings/2020-05-08.mdx @@ -0,0 +1,87 @@ +--- +title: "Engineering Talks - Intuitive Stellar Consensus Protocol" +description: "This overview highlights how SCP works end-to-end—quorum sets, blocking sets, federated voting, and how nomination + ballot narrow the network to one ledger value." +authors: [marta-lokhova] +tags: [tutorial] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Marta Lokhova walks through the Stellar Consensus Protocol (SCP) in plain language, aiming to give newcomers a mental model before they dive into formal specs. She starts by defining quorum sets (who you trust), thresholds (how many of them must agree), quorum slices (subsets that meet the threshold), and blocking sets (the smallest sets that can prevent you from moving forward). + +With those primitives, she introduces **statements** as SCP’s basic unit of agreement (e.g., “I propose transaction set X for ledger N” or “I’m ready to apply transaction set X”). She then explains **federated voting**—how nodes progress from _vote_ → _accept_ → _confirm_ on a statement—and uses a step-by-step demo to show how blocking sets can force nodes that initially voted differently to converge on the same outcome. + +Finally, she frames SCP as a **funnel**: the **nomination protocol** gathers and confirms a candidate set of values (transaction sets), while the **ballot protocol** _prepares_ and then _commits_ a single value. Nomination continues in the background while ballot runs, letting the system start optimistically but still converge safely if the network needs to adjust. + +### Key Topics + +- Clear definitions of quorum sets, thresholds, quorum slices, and blocking sets—and how each node’s configuration shapes safety and liveness. +- Federated voting mechanics (vote/accept/confirm) and why “accepting what your blocking set accepted” prevents deadlock. +- SCP’s two-phase structure: nomination to build a candidate set, then ballot to prepare/commit one value. +- Why ballot can start as soon as a node confirms its first nomination, even while nomination continues (optimistic progress with safe fallback). +- Practical guidance on thresholds: aiming for ~67% across organizations, while internal org nodes may use lower thresholds; recommendation to use Stellar Core’s automatic quorum generation. + +### Resources + +- Blog Post: [Intuitive Stellar Consensus Protocol](https://stellar.org/blog/developers/intuitive-stellar-consensus-protocol) + +
+ Video Transcript + +[00:00] All right, hi everyone. My name is Marta and I'm a software engineer on the Stellar Core team, and today I'm here to talk to you about the Stellar consensus protocol. We're briefly going to go over the agenda, so first we're gonna have some motivation for this talk. Why are we even doing this and what are the things that we're going to cover? We're then going to have a quick recap on chorim's and what they mean in the context of the Stellar network. We will talk a little bit about statements, which are the building blocks of the Stellar consensus protocol, and then we will see how statements are used in the process + +[01:00] Called federated voting. We will have a demo for federated voting to see, kind of how it all plays out. And then, finally, the two steps of the Stellar consensus protocol, which are nomination protocol and the ballot protocol, will be the last thing that we will cover. All right, well, let's start with motivation. Why are we even doing this? So distributes concerns protocol are difficult to understand. There are a lot of steps, a lot of things to keep in mind, and if you're new to the protocol and just trying to get an understanding of. It could be quite overwhelming to read through all those materials and try to understand it, understand everything at once. We also realized that there is quite a bit of advanced material out there and there's a lack of simplified version. So this talk will not have any complex theorems, no math, just the high level ideas to help you get the right understanding of the static consensus + +[02:00] Protocol. And we were inspired by physics. In physics you don't go directly into complex formulas. You first get a high level understanding of what's going on. You try to imagine something before you actually go and work out the formulas and dive into more complexity. With that we're going to briefly go over quorums. So you might already be familiar with quorums, but we're going to go over a few important definitions here. So chorim said is a set of notes that you trust. So, for example, if you look at the diagram on the right, we have a. We have four nodes- a, B, C and D. And you can have four nodes- a, B, C and D- and you can look at the arrows and the arrows represent the quorum sets. So for example, for a has B and C in its quorum set. Another example is: for B has three + +[03:00] Nodes in its quorum, said it's a, C and D We also have the notion of threshold and threshold is a minimum number of nodes in one's quorum set. That must agree. For example, if we go back to our node B, remember that it has notes AEC in D and it's quorum set, suppose that the threshold for B is 2, which means that any combination of 2 nodes would be sufficient for B to proceed. And in this case, like I was saying, it can be any combination. So it could be a CCD or ad. So those subsets, a CCD and ad, are called corns, license you. And finally, the most important definition is the definition of quorum is + +[04:00] And non empty set of notes that contains a slice for each member. So in our example on the right, let's try it and see if we can get a quorum starting with a note a. So remember that a has B and C in its quorum slices. So we add a, B and C to a quorum slices. So we add a, B and C to a quorum. But ABC is not a quorum on its own because B and C also have D in their quorum slices. So we have to add D to the quorum and then altogether ABC and D is a quorum. Another very crucial definition on the Stellar network is notion of nodes blocking sets. Any set of nodes in the quorum set without which the nodes cannot reach consensus is a blocking set. And intuitively, if you think about the name, the blocking set blocks consensus. If you do not see agreement of you're blocking set, then you may not proceed in the consensus round, so you're blocked. + +[05:00] Just with a small example here: if we have a quorum set of four nodes and then suppose we have a threshold three, so we required three out of four to agree. Any two nodes will be blocking and it sort of makes sense because we require three out of four nodes to agree. So if we have two nodes that are not an agreement, then they're blocking us and we cannot proceed and reach consensus. Another important thing here to note is that the concept of blocking set is a per node. It's a node because every node gets to select their quorum set and so the blocking said for each node would be different depending of what their quorum set is. So going back to our example, let's look at the node C, so C has a B. Let's look at the node C, so C has a B and D in its chorim's quorum set and let's say that C went with the most aggressive threshold, three out of three. Everybody must agree. So any node really + +[06:00] In C's quorum set can be blocking. For example, a is a blocking set because C threshold three out of three. So as soon as a does not agree, cease blocked. Similarly, be on its own is a blocking set and D on its own as a block instead, and also any combination of those three nodes are also blocking sets. But here we're just looking at the smallest blocking sets. All right, moving on to statements, what is a statement? So statement is the smallest building block of the Stellar consensus protocol. It's basically something that the network wants to agree on. For example, when a node says I proposed this transaction sets for leisure 5 000 that's a statement. Or similarly, if the note says I am ready to apply this transaction set for l ger + +[07:00] 5000, that's also a valid statement. So the second one here is a little bit stronger because at this point the node is actually ready to apply the transaction said. But nevertheless, both are valid statements and both statements are valid for the network to agree on, which naturally leads me to my next point. How does the network know if a statement is actually agreed on it, thoughts on it and it votes on statements? We had this procedure that is called federal voting. So before we dive into the rules of the Federated voting. It's important to see how a node perceives a statement when it receives one. So I know, let me decide what it wants to do about a statement depending off on what its quorum said about that particular statement. So, for example, and node may say four things + +[08:00] About a statement. It may say I don't know anything about statement a and I have no opinion whatsoever. They can also say I received a and it's valid. and I vote for a, but it's still not quite safe to act on it yet. So voting is basically like a weak form of agreement where a node says that yes, I vote for a because I validated it. But we still don't know network wide what's the state of. A node may also say I accept a if it has seen enough support in its form slice for this particular statement a. But at that point the note still doesn't know if it's safe to act on it yet. And then finally, another might say: I confirm a and it is safe to act on it even if a. And it is safe to act on it. Even if every note in a quorum slides has not yet confirmed a, they will not be able to confirm anything else but a. So this + +[09:00] Means that there is final agreement on statement a in the quorum slice and at this point a note might say I confirm a. So these are the various steps that a node may take regarding a particular statement a. But let's actually go over the rules and see how this transition happens. How does it know the boat for something? How does it move to accept and confirm? So here are the rules of federated voting. And node may vote for a if a is valid and consistent with previous votes. A node may accept a if either every node in its quorum slice voted for or accepted a, or a nodes blocking set accepted a. And let's look at this second bullet a little bit closer. Why do we have this + +[10:00] Rule that a node may accept a? If you blocking set accepted a. So remember the definition of the blocking said. it is impossible to reach consensus. So a node has two choices: it can either be stuck because a blocking said accepted something else, or it might just go ahead and proceed and accept what blocking said accepted. And so this is why we have this rule here that a node may accept a if it sees the blocking set accepted a. But you might wonder what happened if I voted against a before in case, and node may just forget its previous food and accept a note. This does not mean that a node would vote for a. It could accept a but doesn't necessarily vote for. It could have voted for something else, but because it sees the agreement of its blocking said, it also accepts a. And then, finally, a node + +[11:00] may confirm a if every node in its quorum slice accepted a. All right, a little demo here to see how those federal voting rules actually work in an example. So at the top right corner there are some. They're the same rules of the Federative voting that we just covered in the previous slide, but I just put them here like a little cheat sheet for us to kind of always be able to refer to it. So here we have a network of seven notes and, similarly to our previous example, the arrows show the quorum sets selected by each node. And I want to focus here on this dependency of node five. So note 5 has six, seven and one in its quorum set. And it's important here because remember the definition of the blocking set. For this particular + +[12:00] Example, let's assume that the threshold is the most aggressive, right. So we- it's all, basically all nodes in the quorum slice- must agree for a node to proceed. So one is a would be a blocking set. Four, five and six on its own would be a blocking set. Four, five and right, let's move on to voting. So let's say that two different nodes on a network decided to vote for different things, different statements. So three voted for X and then six voted for Y. And then those notes send out messages saying then hey, I voted for X or hey, I voted for Y. And let's see what other nodes do when they receive those messages. So nodes 1, 2 4 receive vote for X from 3 and you know they have 3 in for X from 3 and you know they have 3 in their quorum set. So they validate X, no + +[13:00] Problem with that. So they go ahead and vote for X on the left side. Notes 5 and 7 receive messages from 6 that they've that six votes for. Why they validate Y, no problem. And they also go ahead and vote for y. Now let's refer to our second rule of federal voting: just accept if quorum sliced, voted or accepted. So let's see what we have on the right side every node voted for X. So as right side, those messages get through, nodes may proceed and accept statement X. And so we can see that 1, 2 3 accepted X- and you know, maybe some messages are delayed and 4 hasn't accepted X yet, but this is just a matter of time where the messages reach 4. So something really important is happen here. If we look again at our node 5, + +[14:00] Remember that 1 is a blocking said for 5. and 1 just accepted X and 5 voted for y Let's look at our rule number three, which is except if blocking set accepted. So at this point five sees that it's blocking set accepted X. So it just goes ahead and accept X as well, and we can also see that for node for the messages finally went through and four accepted X as well. What about our notes six and seven? they also see that I've accepted. X and 5 is a blocking set for 6 and 5 is also blocking set for 7. So, referring to our rule number 3, again here: accepted blocking set. Accepted 6 7 can accept X as well. So at this point + +[15:00] Everybody accepted X. So all the quorum slices except the X, in which case we can proceed to our rule number four, which is confirm if a quorum slice accepted. So everyone scorn slices on the right except the next. Everyone's corn slices on the Left accepted X. So everybody can go ahead and confirm X and at this point it is safe to act on it. Great. So now that we covered statements and federal voting, let's actually see how those are applied to the Stellar consensus protocol. is a two phase protocol that uses federated voting to vote on statements and the voting is on statements, and statements contain values. So for the Stellar network the values are transaction sets. The first step of the Stellar consensus protocol is the nomination protocol, + +[16:00] Which selects values to consider for a particular ledger. So it selects multiple transaction sets to consider for the ledger. The second step is the ballot protocol. And what it does is it actually attempts to commit those values which were selected during nomination protocol, and it does so in two steps. At first it prepares values and then it commits values. All right time for another diagram here. So we can think of the Stellar consensus protocol as the funnel. So the very beginning, a node doesn't know anything about the state of the network. It does not know what its quorum slices agreed on its uncommitted. It may really vote for anything. It then goes into first step, which is the nomination protocol, where it selects a few valid values that it wants to consider. So it + +[17:00] Goes through the nominate phase and it selected candidate set. It then goes in to prepare. Once it's selected a few nominations, it goes into the preparer state where it attempts to commit those values. Alternatively, if it gets stuck, it can also accept what the blocking said, accept it and proceed with that value. After that, no, after the preparer is done, notes go into commit phase, where nodes attempt to prepare the values, attempt to commit prepared values, excuse me. Or they can again accept what the blocking set accepted. And then finally, at the very end, once the commit phase is done, this is where we know that the network has agreed on a single value and so everybody applies the single value and the census is reached. So if you were to walk away from this talk, just remembering one thing- this is the diagram that I want + +[18:00] You to remember, just high level this is how Stellar consents protocol works. It basically tries to narrows down all the possible values and at the end you end up with a single value which everybody agrees on. And then you probably have guessed that this yellow step is what nomination protocol does, and then the orange step is what about protocol does for the prepare and commit. Alright, let's get into a little more detail about what nomination protocol actually does. So nomination protocol is essentially Federative voting on statements such as nominated transaction, set C, and though it may vote for any transaction set, any transaction said during nomination phase, as long as it's valid, once the node goes through federated voting on the particular nomination + +[19:00] Statement- and let's say that nomination got confirmed, a node may add that value into the sub that we called the candidate set. is basically just all of the confirmed nominations. There's an important variance here. And note stops voting for new values as soon as it confirms its first nomination. So a node may still accept and confirm values that it sees its blocking said accepting or confirming, but it may not vote for new values. And we will discuss a little bit more why this is actually really important in the next slide. But for now just remember there is an important variant here that we do not vote for any new values as soon as you confirm something. And then finally, when we have those candidates, those confirmed nominations, we may select a single value, which we call the composite value, and at that point we can start the ballot + +[20:00] Protocol on that value. So in the Stellar network, nodes select the biggest transaction set as the composite value. So if you have several transaction sets in your candidate set, you just go by whichever transaction set has most transactions. All right, let's go over the belt protocol. So about protocol, similarly to nomination, it's a procedure of federated voting on statements and there are two types of statements: prepared transaction set X and commit transaction set X. So prepare verifies that everybody is willing to commit this value, that everybody is okay with committing it, while commit ensures that everyone actually commits the value and at which point it is safe to act on the value. The important thing here is that nomination keeps running in the background even + +[21:00] When the belt protocol is running. So you might have started the belt protocol on a particular composite value, but nomination never really stops. So it may update the composite value in the background, and we will see why this is important in the next slide. All right, so why exactly do we start the ballot protocol as soon as we have a confirmed nomination? Why don't we wait for nomination to finish you? So we know that it eventually all nodes will converge to the same candidate set through nomination. And why do we know this? We discussed the important invariant a few slides ago: that nomination protocol stops issuing any new votes as soon as it confirms its first nomination. Remember that it can still accept or confirm whatever it's blocking, said, accepted or confirmed, but it may not vote for anything new because of that property. We can guarantee that + +[22:00] Eventually all the nodes on the network will converge to the same candidate set through nomination. But there's a problem: we don't know when that's gonna happen, and so this is why we are starting about protocol optimistically as soon as we have our first confirmed nomination. But you might be wondering: what can it hurt? Safety? Is it actually safe to do it this way? Is it possible that the network will end up with some different values? Actually no, this is pretty safe and there are two scenarios here. So best case scenario is: let's say we started with in optimistically prepared value and let's say that the network actually agrees with that and everybody ends up committing that optimistically prepared value. So it's kind of the happy path. Well, what happens if the things don't go so well? Worst case: a node might try to prepare a value but it gets stuck because it doesn't see agreement of the other nodes + +[23:00] On the network and so it would timeout. After it turns out, it can either retry with the new composite value- remember that nomination is running in the background- and updating composite value, so we might end up with a new compass value actually, so it could retry with that new value. Or if it sees that it's blocking said made more progress and it accepted some time or confirmed something, it can just switch over and vote for, excuse me, accept or confirm whatever the blocking side accepted or confirmed. All right. So this was sort of a very brief discussion of the Stellar consensus protocol. I also encourage you to check out the blog post that we have. There is another demo in the blog post which actually shows how the federated voting works with both nomination and the ballot protocol, and it's very detailed. So I definitely encourage you to take a look, but for now we're going to go over the summary. So remember that statement + +[24:00] Is a building block of consensus and SCP uses federated voting to agree on different statements. Nomination is essentially Federative voting on statements to select a small set of values to consider for a particular ledger. And the ballot protocol is 32 voting which tries to prepare and commit those values which were selected in nomination. And, most importantly, SCP acts like a funnel by narrowing down possible values for notes to commit. And with that I want to see if there any questions. All right, do I have a choice to choose a threshold value for quorum slices? Yeah, that is a very good question, so you do. But the selection of threshold really depends on how many failures you're willing to tolerate. So generally we suggest that, for if you, for example, to + +[25:00] Add different organizations into your quorum set you want at the threshold of at least 67%, so then you can tolerate failures. But if you have nodes within your own organization, for example, then you can select a lower threshold, like if you are running three nodes and they're all within the same organization. You can have just a simple majority 51 percent threshold. But yes, short answer, you get a choice when you configure your Stellar Core node. You may configure your quorum sets however you want and select whichever thresholds you want. But we generally recommend using the automatic quorum generation mechanism that we have built into Stellar Core, because it accounts for that 67% and it looks at nodes within the same organization and choose them and it chooses a simple majority there. So we + +[26:00] Definitely recommend going that route, because this way you're making sure that your configuration is actually safe and it cannot fork you great any other questions, alright? Well, thank you guys so much. Thank you for watching. + +
diff --git a/meetings/2020-05-22.mdx b/meetings/2020-05-22.mdx new file mode 100644 index 0000000000..0307542a64 --- /dev/null +++ b/meetings/2020-05-22.mdx @@ -0,0 +1,78 @@ +--- +title: "Engineering Talks: Kelp GUI" +description: "This overview highlights Kelp GUI v1—how it simplifies launching and managing market-making bots on the SDEX, from bot creation and price feeds to advanced safeguards and clean shutdown." +authors: [nikhil-saraf] +tags: [tutorial] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Nikhil Saraf introduces **Kelp GUI v1**, the desktop interface for Kelp—an open-source trading bot originally built for the Stellar decentralized exchange (and later extended to many centralized exchanges via the CLI). The focus of the talk is how the GUI lowers the barrier for non-technical liquidity providers by turning configuration-heavy market-making into a guided workflow: generate a starter bot, or create a custom one by selecting assets, setting spreads and levels, and wiring up price feeds. + +He explains how pricing in the **buy/sell strategy** works by composing a numerator and denominator feed (for example, valuing XLM via an exchange feed while treating a “coupon” asset as a fixed $1). The GUI displays the derived mid price and lets users define multiple “levels” around that mid, which translate into ladders of bids and asks. A live demo shows creating two bots on testnet, connecting a fiat price feed (Currencylayer), and using “advanced” price adjustments (offsets) to intentionally misprice one bot so it trades through the other—illustrating how bots place, cross, and refresh offers over time. + +Nicholas also emphasizes **operational hygiene**: if you quit without stopping a bot, its offers can remain on the orderbook, potentially filling unexpectedly. Stopping a bot should both halt the process and cancel outstanding offers so the market doesn’t retain stray bids/asks after you’re done. + +### Key Topics + +- What Kelp is (CLI + strategies in Go) and why the GUI matters for first-time market makers on the SDEX. +- Bot setup workflow: secret key handling, asset/issuer selection, and the “starter bot” vs custom configuration paths. +- Price feeds and derived pricing: composing numerator/denominator sources, sanity-checking quotes, and understanding how the bot refreshes prices on an interval. +- Market-making controls: spreads, multi-level ladders, order sizing, and how thin/no markets affect displayed spread. +- Advanced safeguards and tuning: correction knobs (offset % / offset), multipliers, and preventing bad quoting when price data drifts. +- Safe shutdown: stopping bots to cancel offers so you don’t leave live orders behind. + +### Resources + +- [GitHub PR / Discussion referenced](https://github.com/stellar/stellar-protocol/pull/592) + +
+ Video Transcript + +[00:00] Today we're going to be talking about the Kelp GUI, where anyone can create crypto. My name is Nicholas Har and I'm one of the developers for the Stellar Development Foundation. We're going to go over what is kelp. What is a Kelp GUI version one? We're going to see some screenshots and then we're going to go into a demo and then have some time for questions later on. So what is kelp? Kalp is a trading bot that has a command line interface. It was built for the Stellar decentralized exchange and later on we added support for over 100 centralized exchanges. You can customize existing trading strategies with configuration files. Today we have four trading strategies. You can create new trading strategies by + +[01:00] Writing your own code. Gelp is written in Golang, So any trading strategies that you write would need to be written in Golang today, with the possibility of supporting more languages in the future. We open sourced Kelp in the summer of 2018. The Kelp GUI is a relatively newer addition to Kelp. It is a GUI to manage multiple cut bot instances. Today it does not work on centralized exchanges and only works on the Stellar decentralized exchange. We start off with just one trading strategy to make it easier for people to learn how the UI works. You can't create new trading strategies today, but in the future, any trading strategies that are created on kelp will automatically be supported by the UI. In addition to writing the back end for the UI in Golang, we added support for React for the front end. + +[02:00] The UI was open sourced in the summer of 2019, and only last month did we release the first release candidate of the UI. We have an integration for the CCXT REST server which gives you native access to data from all of those 100 exchanges directly into the UI, without any additional installations. You can download it as a desktop application and very soon you will be able to run it in a hosted test net environment as well. So this is what kelp look like when you first load the application. You have two options. The first one is to autogenerate your first test bot. This is for people who are first time users of kelp and gives them a starter bot that they can use. We have a video that we uploaded with the blog post. + +[03:00] Announcement of the kelp y. That walks you through creating your first test bot through auto generation. The second option is to create your own bot, which gives you a little more controls over the parameters of your bot. On the top left, you can see the version being used for this UI and you also have an option to quit the application when you're done using it. This is a screen to create a new bot or to edit an existing bot. You enter your secret key over here and you can see the corresponding public key. You enter the assets that you want to trade. For XLM you don't have any issuer, But for all other assets you have an asset issuer Coupon. Out. Here is just a token asset that we use. This could represent anything. It is just a test market. + +[04:00] This is the price feed section for the buy sell strategy, which is the first strategy that you are allowed to use on the UI. The price feed allows you to set the prices for the base asset and the quote asset described on the previous page. The base asset is always set as a numerator. In this case, because our base asset was XLM, we've chosen to price it as the XLM USDT price from Binance under the centralized exchange section, And you can see a sample price out here in the screenshot. For the denominator, we're going to price the coupon asset as one unit of US dollar. So we use the fixed value of 1 0 Again, coupon here is can represent any asset. For the purposes of this video and this demo, we're going to be treating it as a US dollar. + +[05:00] This box out here tells you what the final calculation is of your price. You can see how the price is derived. We basically take the price of the XLM coupon market, We take the price of the numerator divided by the denominator and then we get the final price. The sentence makes it easier to understand how you are pricing the asset, which we will go into more detail in the demo At the end here. We have some levels that tell you how you can price the different levels that spread out from the center mid price that has been set over here of 06192. Once you've created a few bots, this is how your kelp application or your kelp forest is going to look like. We have the first autogenerated bot of George + +[06:00] The auspicious octopus. We've created two more bots: Harry the harmless habuka, and Jerry the unsightly shrimp. If you take a look on the left side, you can see the markets and the balances of these bots for those tokens. So George has about 9 799 99 lumens and 1 0 coupon tokens. Similarly, Harry has is trading in the Bitcoin and USD market and Harry has no balance in either of those markets. And Jerry is trading in the Lumen BTC market and has close to 10 000 lumens. The spread for both of Harry and Jerry are negative one because no market exists, whereas George is currently running and has created a market with a spread close to 0 20 This spread out here does not necessarily indicate the spread that + +[07:00] George is creating, but it is the spread of the market. You can also see how often the bot is updated. This is a check on the last time that we got information about the spread and the bids and the asks for this given bot. This typically runs about 5 seconds by default. For the UI, If you click on the three button menu over here, you have the option to select to see the offers and the market for this given bot and also the. To edit the bot: if it is in the stop mode, You could also delete the bot if you wanted. Let's go ahead and see a demo of how the bot works. So I've started up the bot in my machine and we see the first screen out here. + +[08:00] So I'm going to go ahead and create the. first bot manually. So we have Madison, the glamorous sawfish. Let's create a new secret key. So the bot will automatically create a new secret key for us and create the relevant trust lines, So we don't have to worry about that. Let's trade in the USD and euro market. Now we are trading on the test network out here and, for simplicity, let's use the default asset issuer that we have. So I'm going to paste that in both of these token issuer spaces. Now, one advantage of using the default asset issuer is that the bot is going to create + +[09:00] And fund the account with 1 000 units of each of these tokens. Now, if we go to the advanced settings, we can see how often this bot runs, So by default it runs once every 60 seconds. For the purposes of this demo, let's change this to 6 seconds. We also have a randomized delay by default which is set to 15 seconds. Let's set that to zero for now. There are some more advanced settings out here which I won't go into detail for this video. When we want to price this market, let's try and set the best price for the USD token. Now, because this is a dollar and we're generally using the dollar denominator values, we can actually just use a fixed price of one. + +[10:00] However, for the denominator, we're pricing the euro token. For that we actually want to use the price of the euro. So let's go to fiat, which uses the currency layer API. Let's select euro. Now you notice that it says that the price is missing. It also has an error message out here that says that we have an invalid API key. Of course, because we don't have an API key out here. So I've signed up for a free account for currency layer. So I'm going to go ahead and use that key. I'm going to refresh this price And, as you can see, the price is updated. It's always good to do a sanity check on the prices that you get over here, because these are the prices that the bot is going to see when it's running on a continuous basis. 1 088 + +[11:00] As a price for the euro makes sense because a euro is worth more than a dollar. Now, one thing to note about these price feeds is that the this price that you see out here is not the price that is hardcoded into the bot. The bot is going to run the same query at every time interval that it runs, which we previously set to 6 seconds. So it's going to get a price that moves with the market, as you have set for this price feed. So this is a very active and dynamic price feed. As you can see, we run the calculation of taking the net price that we will get, which is1 US divided by 1 0888 for the euro price, which gives us 0 91844 The sentence helps explain that, which is: you are valuing one unit of the US dollar at 0 91844 units of a euro. This makes sense + +[12:00] Because a euro is worth more than a US, dollar. Now we come to the levels. By default, the bot starts out with four levels- Why don't we change these amounts to be 50 just for the purposes of this demo? And it's being set at a spread value of 0 1 from the mid price, Which means that the bid ask spread is 0 2 here, 0 3 here, 0 4 and 0 5 So this bot is now in the process of initializing, which means that the application is creating the necessary trust lines and issuing the relevant assets to the new secret key account + +[13:00] That we created. This usually takes a little bit of time, and it seems like we're done So. As you can see, we have a th00and units of the of USD as well as the euro. And if we hover over to this I icon, we can see the asset issuer And the spread, of course, is negative one, because we don't have a market yet. Now let's create a second bot that is going to compete with the first bot before we start any of them. So we're going to follow the same process, but I'm just going to do this a little quicker. We're going to set this to be around the same time interval just for the purpose of this demo. + +[14:00] Writing our sanity checks. We get about the same price. So that's right. We can just leave this at 100. That's fine. Now we want these bots to compete with each other. Now, normally we would not force these bots to compete, but I'm trying to show a specific example out here which allows this, the second bot, to purchase all of the US dollars from the first bot. So I'm intentionally mispricing the second bot. + +[15:00] I do that by adding a 0 05 cent offset to this bot's price. Now, usually this advanced setting section is there to correct any prices that I might see. So, for example, if I was to go to the Google website and find a price for a token, and that's the one I chose to use. Or if there was a price that my bank was giving me, and I wanted the currency layer price feed to come closer to that. We have a way of adjusting the price feed once we receive the price feed, by setting the offset percentage, which can be a negative value, and the offset as well, which can be a positive or negative value, and this allows us to make any corrections in the price that we get. So in this video I'm misusing this advanced setting intentionally just for the purposes of this demo. Let's go ahead and create this bot. + +[16:00] So Madison was the first bot that we created and Joshua was the second one. If I click on show market, it's going to open up the screen on the second window. Let's see that. Now you can see we have the USD euro market for the same token issuer, but there's no market. Let's start the bot for the first part, which is Madison. This is the one that is priced correctly. What we're going to see is that there are some bids and asks placed against this bot, So you can see that the bot is pricing this asset at close to 918. + +[17:00] Now let's start the second bot. What we're going to see is that this th00and USD balance is going to get trained out and the euro balance is going to increase for Madison, whereas Joshua is going to see an increase in the USD balance and a decrease in the euro balance. So what is actually happening is that Joshua is being priced- if you look at the mouse cursor on the right side over here- and the asks of Joshua are hitting against- the asks of Madison, which are then crossing over, and then we get some trades. So this is being run once every 6 seconds. So pretty soon we will see the entire amount getting trained. You can also see out here that we have + +[18:00] Two additional bids at 96, which is higher than Madison's bids. The reason is because Joshua now has enough dollars to be placing these bids, but Madison is not able to consume them, So Joshua's bids would then lie on the orderbook. So it looks like we're about done here. Madison has 0 2 left and Joshua has pretty much the entire amount of the 2 000 USD initially allocated for these bonds. You can also see that the bids and the asks have changed depending on how these orders were placed and consumed. + +[19:00] Now getting back to the curl window. So we are now ready to stop these bugs and quit the application. But before we quit the application, we should be very careful to hit the stop button, Because if we don't do that, then these bots will- although they will be stopped running these orders of one bid, four asks and four bids and zero ask would still remain on the orderbook, which would then potentially see some trades happening without us having expected it. So it's always important to stop these bots. Let's go ahead and do that. In the process of stopping these bots, the application kills the bots as well as deletes the offers associated with the bot. As you can see, these bids and asks have gone to zero And the spread has gone back to negative one, as you can see in the window. + +[20:00] So, in summary, we went through what is scalp, what is the kelp GUI version one, some improvements that we would have in the UI, the screenshots and a demo of how you can create two bots in the UI to trade against each other. Let's go into some questions. + +[21:00] Thank you everyone for attending this session and I hope that you all learned something about the cali today. Thank you, + +
diff --git a/meetings/2020-06-04.mdx b/meetings/2020-06-04.mdx new file mode 100644 index 0000000000..0850f542d2 --- /dev/null +++ b/meetings/2020-06-04.mdx @@ -0,0 +1,183 @@ +--- +title: "Protocol 14 CAPs: Claimable Balances and Sponsored Reserves" +description: "The first livestreamed Open Protocol Discussion reviews proposed Core Advancement Proposals for Protocol 14, focusing on claimable balances, sponsored reserves, and process improvements for evaluating and rolling out protocol changes." +authors: + - david-mazieres + - eric-saunders + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - tom-quisel + - tomer-weller +tags: + - legacy + - CAP-18 + - CAP-23 + - CAP-31 + - CAP-33 + - SEP-8 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Justin Rice opened the first livestreamed Open Protocol Discussion with an update on Protocol 13, explaining why the validator vote was delayed by two weeks to give exchanges additional time to complete testing. That operational context set the tone for the rest of the call, which focused on how protocol changes are evaluated, coordinated, and safely rolled out across a diverse ecosystem. + +The discussion then turned to proposals targeting Protocol 14, especially claimable balances and sponsored reserves. Participants dug into how these changes affect payment flows, reserve economics, and wallet and service design. As the conversation evolved, it widened into a candid examination of process gaps—how working groups should be structured, how downstream impacts on Horizon and SDKs can be surfaced earlier, and how upgrade communications can better reach exchanges and operators who do not closely follow core developer channels. + +### Key Topics + +- **Protocol 13 readiness and coordination** + - Validator vote delayed to late June to avoid breaking exchanges. + - Need for clearer runbooks, readiness checks, and direct outreach to large operators. +- **CAP-23: Claimable Balances (Two-part Payments)** + - Separating sending from receiving so payments can be delivered to unprepared accounts. + - Decision that unclaimable funds route to the fee pool rather than back to the sender. + - General agreement that the proposal is ready for Final Comment Period. +- **CAP-33: Sponsored Reserves** + - Allowing one party (issuers, wallets, services) to cover reserves for user accounts. + - Handshake workflows between sponsor and sponsored account, especially during `create_account`. + - Sequence-number lockups as a scaling concern and recognition that this exposes a broader, long-standing protocol limitation. +- **Delegated authorization and future primitives** + - Discussion of longer-term ideas (operation-level authorization, pre-signed operations) to reduce reliance on sequence numbers. + - Consensus that CAP-33 should proceed while the broader problem is addressed separately. +- **Issue 622: Close time application** + - Tradeoffs in how transaction time bounds are evaluated during consensus. + - Concerns about delayed ledgers, expired transactions, and brittle smart contract assumptions. + - Agreement to clarify requirements before committing to a concrete solution. +- **Issue 624: Improving the CAP process** + - Formalizing working groups and clarifying ownership and responsibilities. + - Capturing downstream impacts on Horizon, SDKs, exchanges, and hardware wallets earlier. + - Improving upgrade communication so ecosystem participants know when and how to prepare. + +### Resources + +- [CAP-23: Two-part Payments (Claimable Balances)](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) +- [CAP-33: New Sponsored Reserves](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) +- [Issue 622: Request to change close time application](https://github.com/stellar/stellar-protocol/issues/622) +- [Issue 624: Update CAP process](https://github.com/stellar/stellar-protocol/issues/624) + +
+ Video Transcript + +[04:00] All right, welcome everyone. We're live streaming this, just so that everyone knows. This is the first ever broadcast of the Open Protocol Discussion, which is a biweekly meeting in which we discuss and plan for upcoming changes in and improvements to the Stellar protocol generally. These meetings are to review Core Advancement Proposal, also known as CAPs, which suggests new features and improvements to the protocol. Sometimes we also talk about the process for perfect call improvement and we're going to do a little both in this meeting. First we'll talk about two CAPs and one issue that outline proposed changes. Those are all for possible inclusion in Protocol 14, and then we'll cover an issue that relates to the process for the creation accounts. + +[05:00] Finally, I'll just touch a little bit on the process for orchestrating network upgrades, because we've learned a lot upgrading to Protocol 13, which is has yet to happen. So we published some pre-reading requirements for this meeting there in the event description, so if you're following along, you may want to look at those so that you can keep up, and there's also an outline of the agenda on the left of the screen, I believe, and without further ado, I guess we'll just jump in. I wanted to start this out with a Protocol 13 update. So that's the first agenda item. Basically, after surveying the ecosystem, we decided to push the upgrade vote two weeks is supposed to be today, that was gonna be June 18th- to give people more time to prepare. Basically, most of the Stellar-based businesses that we talked to are ready right now, but several crypto exchanges aren't, and we just wanted to give them a little bit more time. so they don't break. So that's the Protocol 13 update news. Any questions? Okay, cool things that we have to talk about: our CAPs that are potentially for a + +[06:00] Potential inclusion, and Protocol 14. The first is [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) to part payments. It's been around for a while. We've had a lot of discussions about it. The current version is pared down from where it started. Basically, it creates a new ledger entry called a claimable balance into new operations that allow you to create and claim a club level balance. The idea is to be able to separate sending and receiving of a payment so that you can send a payment to an account that isn't prepared to receive it. [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md)- is there anything else that I mean we talked about this a lot Is there more discussion that needs to happen about this? Doesn't want to have any questions. Comments. Jonathan drove, do you feel like you've gotten the feedback you need? I think we're ready to go. I think the only outstanding thing here is that, based doing what we decided two weeks ago or whatever, the last time we met was, I think, two weeks ago. where this isn't in the proposal yet. But instead of + +[07:00] Trying to send funds back to you know, in basically, instead of like, trying to do this thing where it's like, oh, you try to send them funds back to the creator, but if you fail, you send them to the recipient. We're not gonna do that anymore, as we decided, it's not reflected in the proposal yet and instead we're just gonna, you know, unreturnable funds go to the feet pool. Basically we're already working on that in the implementation, but the proposal hasn't been updated yet. I just wanted to mention that. But everything else is as we agreed. I have a question. The question is, if I wasn't clear to me reading the proposal, if the account that's the created buyer or weak at 33, becomes the sponsor, do they always have the possibility of claiming back at the claim or balance, or does the creative, the creating account you need to add themselves as a claimant to the other claim of I. So the creating + +[08:00] Accountant doesn't have any special privileges relative to any other account. It was only there for this bookkeeping about returning the, about return in reserve, but it's not special in any other way. You know, if we took the approach of like always destroy the reserve, then we wouldn't even have that information there. One thing I will mention is like we're not, like that created by field isn't gonna exist anymore. Like the [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) functionality is going to supplant it, so there won't even. It won't even be there anymore. It won't look special anymore relative to any of the other sponsorship things. But no, there's nothing special about it. Does that answer your question me? Yep, great, okay, so if there are no other questions, the plan here is that I'm going to the people who are eligible to vote on moving this into Final Comment Period will email me by the end + +[09:00] Of the day tomorrow and if this, if they, agree to move this into Final Comment Period, we'll post on a mailing list, get final comments from anyone out there in the community and go from there. Sound good, cool. Next on the agenda: capped 33 also for potential inclusion, and Protocol 14: sponsored reserves. This proposal allows an entity to cover the reserve for accounts controlled by another party without getting that other party control to the reserve. It basically extends account and ledger entries so they record pertinent information about sponsorships, creates new operations to initiate and terminate sponsorship and update sponsorship information for existing ledger entries, and so the goal is to allow people like asset issues and wallets to cover the reserves for users. This one landed more recently, and so I guess my first question is: does anyone have any comments, questions or suggestions about the sort of recent version of this that Jonathan turned in? I had a question. In order to use this + +[10:00] Stuff, you have to have creation of the sponsor. The sponsorship happens by one account and then there's a closing out and that happens by the other account. Right to make sure the this thing is sandwiched properly with angel not supposed to authorize. Question is: how does it work him? Like a workflow, a kind of vibrant, like workflow and a great Lee's account? I was struggling to kind of see in my head what the path was, because you have to get the account, the two accounts, to coordinate with each other. Do we have an example of that somewhere? What could somebody talk me through it? Enough revived, so that certainly should answer this. We might also have more context on this than me. So if you feel comfortable taking this money, I would appreciate that, but otherwise I will do it. Yeah, I can talk about it. I actually also have a question about this and a concern + +[11:00] About how, specifically in the situation that Eric brought off by creating accounts, but yeah, so I think if it was to work, how to read in the proposal, the client- a client on the server- would have to be negotiating. So the clients gonna make some initial hello, that starts off the process. The server's going to create the transaction, assign an account that can use that sequence number for the duration of this whole handshake. The transactions going to have to be signed by them and passed back to the client, sigh and then submitted. Say there is like this back and forth negotiation that has to happen. The sower or the service that's creating and sponsoring, create an account and sponsoring it can't do it on its own. Does that answer your question, Eric? Yeah, I guess so, although I start having more questions then, like what happens if the + +[12:00] Account doesn't exist? Just fine. So do you think it would make sense to explain this in a non normative section of the document? I think we actually need a new set for this and it's very analogous to what SEP 8 is to [CAP-18](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md), basically how does? How do you actually do to the negotiation off-chain and then, before you submit to the network, so I would. I think that, Lee, because you guys are gonna be probably one of the first implementers of this, it would be great to have you guys figure out what makes sense to you and then inform the rest of the ecosystem or ask for comments. Yes, I think. So I actually have a concern with this. So my understanding of why we have that second, that's that last transact, so their last operation to confirm and close it out, he's: + +[13:00] We made what we want. We see value in the. account that's being sponsored, confirming that they actually are okay with being sponsored, and, I think, a problem with that. So I think that works for a lot of cases where the account being sponsored can be the account where the sequence number is coming from. But for the create account operation, I think this is going to be really difficult at scale because the service, the sponsor, is going to have to lock up an account with a sequence number during that process of that negotiation, and I think that's gonna be. I think that's going to be problematic enough that this way that could prevent this from scaling. And we really want this to work, sponsorship to work at a significant scale, because it's primarily for an enterprise that's going to be sponsoring lots of accounts. So I'm wondering if, like, if we're putting too much value on what this soul's want- the problem of the souls- and if + +[14:00] This will create a larger problem. Yeah, that's a good question. I mean there's definitely like this is not the only sequence number provision problem in the world of Stellar, for sure. And I think, given the like you know, fee bump improvements that are coming in Protocol 13, this is kind of a step backwards in that regard for this particular issue. I am a pretty strong believer that there are a lot of advantages to making this like double signature requirement. I mean like I think we're gonna be here for months otherwise, just in the sense of like me going through and just like making sure that everything is still going to work and doing sanity checks on everything, because it weakens the backwards compatibility constraints a lot. That being said, like David was at our last meeting talking about. I think you + +[15:00] Actually sent an email about this, which I'm going to confess to not having read as good as I feel have read it. It's not the startup mailing list. I can post a link to it somewhere else. If you want, yeah, review it after this now, because it's seeming more relevant. And I wanted to start by saying, like, whatever I'm about to say is like definitely not gonna happen in Protocol 14, no matter what, because there's just no, there's no time. But like, as a longer term solution, like providing some other kind of mechanism to avoid this need to, you know, manage sequence numbers for all sorts of random stuff. That could be the long term solution to this kind of problem, you know, using something like a hash preimage or something else that you know to do it, Mike. I think that's like the calm. That was the concept, right, David? Like some other kind of authorization thing. Actually, it was basically saying that you can sign an operation and then that up, you don't have to sign the transaction. Someone can use the transaction, he, someone can + +[16:00] Include that operation in a transaction and it will not increase the signature requirements on the transaction envelope. Oh, so that would allow you to sign the transaction envelope at a later time. Basically, once you've already tripped, like at a time when you can choose the sequence number, right, like basically, let's say, I want to allow you to add a particular trust line to my account, or I want to allow you to authorize a particular trust line on an account. I can just sign that one operation and give it you and you can include that operation in whatever transaction you want. Yeah, that's an interesting idea that would mitigate this problem. I mean, I'm kind of like backing out on like. I'm kind of like backing out on this, basically, and look, I don't have a good way to avoid this problem today, given this design, and my stance is kind of like the merits of this design warrant searching for another way to avoid this problem, which is like a + +[17:00] General purpose Steller problem. Anyway, you know, trying to solve it for this specific issue isn't gonna solve it for some future issue where we're going to encounter the exact same thing and they need to engineer some solution for that as well. And, to be clear, the problem arises specifically with the create account operation, correct? Yeah, and it specifically arises there just because the sequence number is being locked up by another party. So you know most the time when you're creating accounts- right now it's not such an issue, because the service loss of the sequence number for it for the time it needs it to be locked up for- but this is gonna be looked up for the time- requires to party to negotiate. Yeah, I mean one approach you could kind of take. It's basically the stance that, like, you need to do. Basically, you set some time limit on how long this negotiation can take. You know + +[18:00] This off-chain communication. You say like basically, like, hey, I'm, unless you take like up to one minute to sort this out with my server, and otherwise you have to like reinitiate this with me at some later time and in that time again you'll have one minute and then this could allow you to use like a small pool of accounts. You know, you only need to maintain one minutes worth of accounts, basically to use a sequence numbers. Obviously it's not amazing solutionn it's not elegant. It comes back to the channel problem. Again sounds like a minutes worth of accounts for a high through, but service is a lot of accounts agreed. Well, I'm not so sure. I mean, signups are typically a very small fraction of what happens on a service, and so that's, I think, when you do create account, during escrow operations as well, so I'll make it. Might make a trickier, but I don't know. My take is like the you're probably gonna have to build a channel system anyway with things the way they are, and so sure. This + +[19:00] Maybe means that you need a hundred channel accounts rather than ten, but it's not really fundamentally changing the situation. Maybe that's a really optimistic, I don't know. Yeah, I think where I can see the challenges is just in typically, at least how I know by- we have implemented this before- is you take a channel account, you lock it for some predefined operation that you're gonna run in code. So just becomes more complex to say, okay, I'm gonna lock that based off someone else's actions and or some timeout. Yeah, I agree, it's just building on existing complex in here. So, yeah, it's not really new complexity. Also, I think the timeouts are in the same order of magnitude, right, like we're talking at Terra transaction, typically valid for, like, let's say, a minute. I would don't imagine you would go, you know, longer than that for those + +[20:00] Interactive things anyways, I mean not know inter, don't directly. I mean it's really just you're physically passing this to the client. They kind signs it and you know, I know, that it's probably the same thing. That's what I imagine. I think this is a good opportunity for us at the SDF to put some open source example of how to actually achieve something like this in a- you know, in a fairly high throughput manner. And, like John said, I think that this specific problem doesn't actually relate to [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md), it's more of a general store problem. So maybe trying to solve for this right now is counterproductive. I mean, on the other hand, it would be nice to at least commit to some way of solving it. You know, like, + +[21:00] If there's an assay, you know this is gonna be a problem and what we do is we approve [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) and then, you know, followed by six months of discussing which of like four approaches should we take to solve this problem. That could be an issue. I don't think so. I think there are also, you know, like we have to solve it in general, like it's not just for that particular operation, right? Like you won't even like a paycheck 18. You need some way of doing that efficiently. I guess, happy, yeah, then maybe they should be disgusting. Maybe there should be just a short, non normative section of the CAP that discusses the need for some kind of mechanism and then subsequently, CAPs can cite this, you know, will sort of provide ammunition to get whatever a solution would come up with over the line later on. Yeah, I think that's great. Now I'll add that after this meeting or sometime later this week. Just a little discussion about how this is like. It's an annoying + +[22:00] Source of complexity. It's probably worth it nonetheless and it's just a manifestation of a bigger problem that we're fighting all the time. And then that, and then the plan would be to create some sort of group to solve this larger problem, like a working group. But from what I understand this [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md), we could move on to Final Comment Period, because it seems like there's not a way to do it that doesn't encounter this larger issue. Is that correct? Yeah, I mean my stance, that would be like it. If you avoid this issue, you'll find a difference, like we'll find the other issues that were problems with [CAP-31](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0031.md), which we rejected, in favor of this. So it's like you can't avoid both sets of problems. You have to pick which one you want. I'm picked this one, this set of problems, only from the perspective that, like they're problems that are general purpose manifestations, not single purpose, and I think that the other benefits are better is on top of that. But, John, you think you can add this language before Final Comment Period. + +[23:00] Yeah, I mean like we make modifications to these during implementation also, and then we're going to the implementation period. So, yeah, I mean I'll add the language and it'll be there during the Final Comment Period for sure. So you'll add it and then we'll vote. Okay, John, when John's added it, I'll submit it to eligible parties to vote on and then we can see. And if it, basically, if people are like no, we don't think it's ready, we can bring it back in the next meeting. But if people look at it and decide that it is, we can go ahead and fold it into Final Comment Period and open it up for final comments. cool. I had one other question: other operations. You said other operations will make it semantics and what its behave correctly proposal. That's, Spencer, that might include. I'm having a little trouble hearing you, Eric. Can you say that one more time? Yeah, the other operations need updated semantics in order to behave correctly. + +[24:00] Can you just give like a high level view of what that might include? Is that something we need to worry about? It's not something that I'm already worrying about. I'm working on it. I was working on it right before this meeting actually, but basically, but like, what's kind of what kind of needs to be done is, like, everywhere where we do like a, where we change the like account numbers of entries field, we now need to do it. We now need additional logic to say like, hey, like, what's the response? Err, oh, if there wasn't, and then do what we used to do. If there was, then we have to do all this other stuff, like, oh, like, check the sponsoring account still exists. It doesn't have to still exist, for example. So, like, that's a new failure mode. I haven't worked out the precise details on this because it's like I need to kind of go to the implementation and see what's gonna work, and that's why it's kind of very nebulous in the proposal, but the kind of consequences you're gonna be like potentially a few new failure modes that are only possible with sponsorships and basically just + +[25:00] New code for me to write, but every just perspective. Everything will kind of just like work, the same like when a create account works. It'll just work, and you won't really be able to tell the difference, other than some sponsorship. The information will have changed. Okay, so it sounds like most this is internal to the implementation, but we might see some new error codes, yeah, and like that. Those kinds of changes are where, like we have like the ledger entry extension v1 and the account entry extension v2. Those changes are where those things will, we will be, ma will be maintained. So you know that's pretty much what it comes down to. Okay, is there anything else there to talk about in relation to [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md). Well, I feel like the plan right now is John gets back to work. That's this extra session. You send it out and see if it gets voted in final, comically, but I + +[26:00] Don't want to close it if there's still questions- just want to say that it's a great CAP John, much better than [CAP-31](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0031.md). I think I really like the whole moving things to the off-chain logic. Really nice cover. I appreciate that great. Well then we will move on to the next item on the agenda. That's issues 622, which is about changing the application of clothes. Fine, basically, it changes the clothes time for a transaction so it's set for the ledger after the transaction gets a bike, because right now in a bank transaction we use the clothes time decided during consensus and that can create issues, for the transaction gets accepted but into a ledger but fails later. I think this is just a just an optimization. Does anyone have any issues with it? Does it feel like something that should be worked on? For Protocol 14? Yeah, it's an annoyance, + +[27:00] Basically false about contracts and we are kind of hanging out those things over time. This is a small item, so that's why. Well, yeah, I'm not sure I understand the issue right now. Who decides the time? That's evaluated against time balance. It is not about deciding the time. This is about all we reason about the time from a transaction ready DT point of view because of time balance, right, okay. So time downs are you know we look at? We compare it to the to some time? Right, and right now we use the time that were this tidy during the consensus round, right, and this is wrong. And so in the wait time, you're proposing is to use the time from the previous + +[28:00] Ledger, because the time from the previous ledger was already decided when you were checking for validity of the transactions when they were submitted. I mean, this seems somewhat dangerous. This means like, if there's an outage, it's possible that we can apply a transaction that's like an hour old or something. Yeah, that's yes, and the first time make also jump by an hour, right, so there's no. Like with we have to decide. I mean this is not a. You know that's just the way they know. You have to make a trade off you know what happens. You have an outage. Which transactions you want to reject. I guess. So like, let's say that, like we have an option where I have a transaction that I can submit that will trade X for Y or something right. And since I don't know the value of x and y in an hour, you know, + +[29:00] Like we agreed on this now, so now I, could theoretically, you know, toss the network and get like a free option to, you know, basically make my decision with an extra hours worth of information. I mean, obviously the alternative is- and that's a lot harder- is to remove those transactions from the transaction set and the things that we cannot do that right now we have no way of doing this right. Consensus is still running like he can't change the transaction set. I mean, I do see other ways of doing this and I haven't, you know, haven't thought deeply about this, but just you know. So another thing if you do is, for example, when you nominate a transaction set, it could have some expiration time or something, and then, but that's not your fist, okay, you + +[30:00] Know, bit based on, or you could kind of like- I'm not saying that you would have you would look at very DG based on- unlike some heuristic on the time stand, awesome. Another saying: like you know, currently we sort of take the largest transaction set when we're sort of combining multiple nominated values and now maybe we could take the largest transactions or the transaction set with, like the most number of unexpired transactions or something, but you still end up with some expired transactions, right? That's the issue here. Is that those guys? What do you do with them? Because it is basically the? I guess the reason we have this problem is the close time and transaction set are kind of decided separately. Could we modify our + +[31:00] Validity checks to like- I'm specifically thinking about David's like one hour window thing that he was talking about. So this is still under the assumption that we're gonna make the change described in 622. But could we modify the validity check such that you won't vote for anything if you're not Byzantine, that is in? Basically it's like transactions, time bounds are checked both using the last closed time and the current time, because then it's like you wouldn't vote for something from an hour ago because it wouldn't have been valid using your current timestamp. It has to meet those conditions to work. There's the only vertical level change. It's not that you only if you do that, you only reduce the chance of having a bad time stamp. We don't make, you, don't eliminate it, and so that the main- I mean it's + +[32:00] The main issue here- just that we have garbage in the set of transactions that we can't charge right, yeah, right now, no, it's not gathered, I mean it's a. It's more like that. We end up with transactions that are accepted for, you know, during consensus, but because they are now, I graduated against a different time stamped and the one from you know when they were nominated did now becoming valid. So now you have like those smart contracts potentially getting dust right where, if I, because you know they are valid when consensus starts and now invalid during consensus and yeah, they are failed transactions and they will consume that account number. And now, maybe that smart contract is that way they don't consume sequence number. The bug is but 622. That's what the core of the problem is, because, you know, we have + +[33:00] The invariant that anything that gets into the ledger, we're going to guarantee we're gonna take the fee and we're gonna take the sequence number and so did. That's the thing that we're betting law wants to combat here is, you know, getting your smart contract broken by one of these like boundary case time stamps. Well, this is another way to solve this would be, say, authenticated operations. So I feel like there are other things that know why you think that know how this has nothing to do with the authenticated operations. This is time stamps. Well, the authenticated operation gets rid of the problem of consuming sequence numbers. It the part of the problem here is how brittle a lot of these protocols are because of, like, sequence numbers, right. So the issue that + +[34:00] Sometimes don't work, right. So, like you're saying, like, yet you could fix certain smart contracts by using, like those, you know this new. You know like, basically use a different sequence number. Basically, from you know a different account, you offload the sequence number, some you know live account basically, but yeah, that's not what we are talking about. You know, sequence numbers in general, like today, we do see, do like once in a while Ledger's will failed time. You know in the set of trade transactions. I understand, but I'm saying that the one of the arguments in favor of this is that right now there are annoying situations that break certain protocols because a sequence + +[35:00] Number gets consumed by a transaction with an invalid time bounds. And I'm saying that, well, because of CAF 33. You've already said there's a need for something and one of the ways to do that something would be the authenticated transaction with the authenticate operations. Not saying that's the right solution, but it just so happens that authenticated operations also solve this problem. All right, no, it cannot hold it right, like you're saying that no smart contract would ever rely on a time bound or something like it. Yes, no smart contract, that ideally, smart contracts would not rely on sequence numbers in a way that breaks if the sequence number gets consumed by an invalid transaction. That doesn't sound that plausible to me, actually, + +[36:00] Because, on Elections, if you do that right, not only that, but you can't do the bump sequence trick without that. And you can because of the way I mean, at least though I'm not saying this is the right answer, but it's an existence proof that you can do this right because of the way that, because the authenticated, operations can depend on there being some other operation in the transaction, that other operation can be a pump sequence, right. So we've got existing smart contracts like. What about ones who already have this problem? Then they'll have a different problem. If we do 622, Oh, like only a line, the only reason you would have a problem is the DOS situation. If you have, like those funny time windows, normally it's more like starting from a certain date. This is valid. You know + +[37:00] Expiration. You know if you have an expiration and you don't write around the expiration time, I can see that this is a problem regardless. Right, we have this is not a sole thing like well. You basically rely on the fact that the network has a semi accurate time, right, but in the event of a dirty, not accurate. It's going to be off by some number. Look, I think that you know there's a problem here. I think that if you were to write up like a requirements list, my suspicion is that I could, you know, propose some alternative that doesn't look like the proposal here. Whether that's better or not, I don't know, but I think it would probably be worth considering multiple + +[38:00] Approaches based on a kind of specific, kind of like a slightly more detailed description of the problem and like what we're trying to achieve. Does anyone object to taking that course, basically writing up a requirements list and getting David an opportunity to come up with an alternative solution? That's fine. It doesn't have to be like you know I'm not trying to create a lot of make work here, like you could just be. You know a couple simple examples of what we're trying to. You know scenarios that were tranqued where we're trying to avoid a problem, but I think honestly that it would be. It'd be good to think about it a little more just from the perspective that, like I want to try to understand that da scenario well, like, is it a problem or does it just sound like a problem? I'm not sure that it's action a problem, but I'm also not sure that it's actually not a problem. It's a big existential question. So, like, I think + +[39:00] It's worth looking at least a little deeper about this and making sure we know what we're doing. I think we're also talking about that type of scenario is quite a bit already on the mailing list, I think, because David had this other change to the timing bounce. To remember David, like that was like what yoga. At this point I remember we went back and problem on time bonds. That's, basically, is what ended up being replaced by [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md), I think. But in that proposal he had some interesting new time bones, right, I had like preconditions, I think. But there were some discussions around like those type of things, like what happens when you, when you're like an attacker delays the network. You know what other type of properties you are trying to get here, and maybe the issue is that we don't fully right now we don't have basically + +[40:00] A written statement on what we expect smart contracts developers to do when you have this type of attacks. I think that's made it just you know one of the other things that we need to think about here. Yeah, so, yeah it. Basically what I'm saying is, before someone invests the time to like write a whole CAP for this, let's just like get some requirements down and just, you know, at a subsequent meeting, just talk through like whatever approaches people have and come up with an approach. We think it's like the best approach because there may be other approaches, maybe not, maybe I'm wrong, but that seems like a great solution to me. Cool, + +[41:00] Any other discussion about this. Are we ready to move on to the next item? Great, so the next item is issue 624, which is improved the CAP process. So it clarifies the role of working groups. Also, it tends to take into account more information about the impact of proposed changes on downstream systems. Thoughts, questions discussion that we want to have about this. I mean, I guess for me it makes sense to better figure out how working groups create CAPs and who owns them and who participates in those groups. And I mean this proposal made a lot of sense to me and then I think it's just a question of starting to experiment with it and seeing if it works right, like maybe like as background, like one nice, when I wrote this stuff, like one of the things I also + +[42:00] Took into account is that in the current like process that we have actually written in the criticals repo, it says that there is this, I know mythical creature that participates into the CAP process. That's the body or whatever. Yeah, that's right, that doesn't exist. So and basically, as I really, and also you know, with the way we've been working, more and more it seems to me that body was, you know, concept, was more or less a over engineering in a way from the protis point of view, in trying to take away, you know, like room for the working group to actually figure out who are the best people to actually have in that working group. So I think, like, if we end up with + +[43:00] Like more, something more like what I'm highlighting in this issue, who would remove the body from the process? Yeah, I mean just generally object to the word buddy, it just sounds I just don't like the sound of it, okay, but also, I think if it's something that we're not using- and it was the same thing that John brought up on this with it with the test section, right, like I guess that we need to just look at the actual- I guess it leaves in the CAP reading that defines the CAP process and make sure that it actually adheres to a process that we feel good about and that reflects what we've learned. But then I think we also need to be relying more strongly on working groups, right, yeah, and maybe, like III, maybe this time we can. I saw that Eric had a question like: what does it mean to have the rest of the stack? You know, like, light up and all that like to me, the intention is to have the working group + +[44:00] Think about the implications of that change in a more holistic manner. Basically, if we need, like very often, some of the changes at all, we need a set, basically, no, the sap is not should be basically written in parallel with the CAP, you know, by the working group, if that's the only way that thing can be used, because that's how we are going to find bugs in the CAP. Another thing on performance, because I think Eric, with Erica maybe I don't, somebody has asked about performance- I wanted people to think about performance implications in other systems as well, like the working group should be thinking about, well, the CAP itself, or he talks about, you know, the local performance impact, but at the same time, for example, one of the most painful changes in Protocol 13 is- but it was very surprising to me, it + +[45:00] Was the change that he column in Horizon, right the table, migration that takes like nine hours. Well, whatever, it's something you know, maybe we do- would have it made some changes, I don't know like. You know, like, if we had identified this early on. So this is kind of thing I'm trying to get to is get you know. Sometimes changes look trivial, you know, in the poor layer, by the implications up streams are just terrible, and I kind of want people to think about those type of implications early on so that we know we can make adjustments into. Basically, the working group would figure that stuff out, document it as part of the proposal, submit it to this group with all that documentation, so that all the research would be done. You know, yeah, a little bit more. What we were talking about, yeah, and to me that sounds great. I mean the only question that I have is a feasibility + +[46:00] One right like if we had the all the right people to staff any given working group that we wanted to create. This would work. I just wonder if in reality we're gonna run up against issues where we just like we want to have three working groups. We don't have three working groups of people with the necessary insight just to build those working groups. That's my only like feasibility concern that make sense. I mean we can you know I run into that problem already and yes, we do. And at the same time- and I know my response is that if we don't have time to think about this, maybe we should not do it in that protocol version. If we do the due diligence, you know that goes with a specific change. That means we're not thinking about it right. So I think that the additional lift here which is reasonable is to basically improve the process of putting together working grade. So it's like improve what a working group does, define it better, add it to the CAP process and then also get + +[47:00] Better at putting working groups data, which is seems reasonable to me and my hope also made that maybe the chair on the top would be. If we have more of those kind of lively discussions that the working group level do. They should become quite be a bit more. Those conversations, I think, end up being a little more inclusive from a ecosystem point of view, because they are actually touching a lot more different paths right now. I think the issues with any thread on the dev mailing list that our protocol changes- they are so far removed from the end from the actual developers that nobody can really chime in. Right, they don't see how this is really impacting them unless it's, like you know, obvious. So my hope is that, yeah, we can maybe get a little more feedback early on from the you know people that are going to use that those things in the + +[48:00] Future. If anyone's watching this right now is like I would love to be in a working group in the future, you should email me just another org like I got it. You know, I think it's gonna be about going out and collecting people right inside, people with insight, people who are building stuff and making sure that they understand the potential implications that one of these chord changes have and getting their insight rate- yeah, that makes sense to me. I like the working group direction and being more inclusive of, you know, people on the Horizon team or SDK team or even application developers. I think one other element that seemed pretty central to me that I didn't- maybe I missed it, but I didn't really fully see it flushed out in this issue- was kind of the contents of the CAP and having an explicit section of the CAP that discusses the changes that we've made to Horizon. So, like to Nico's point, like + +[49:00] Saying, yeah, we're gonna migrate this column and, within the base table, we're gonna change the schema this way: I'm going to add these endpoints, we're going to modify the go SDK to have these new calls or deprecated this call. That seems pretty key to have as part of them for understanding the downstream effects. And I didn't really see it articulated here. I mean that the. issue, by the way, like it, I mean like the issue, by the way, like it was really to start the conversation. It's not the actual day for, oh, you know what we're going to do on the on that week, me or whatever, at the top level, I agree. I mean, I think, at the same time, yeah, who the cab become, you know, more implementing implementation, you know focused, I don't know like I like that right now it's a + +[50:00] Little more the spec level, you know like functional and not so much at the you know too low into the details, right, and I think that if we have the right participants in that working group, the implementation kind of pulls in place more in GitHub issues and so on, not in the actual dark, like the actual dock is more like tracking. Like the high level, like does that even make sense, right, for all the systems involved and, depending on the change, there may be a cept that needs to. Actually, certain CAP don't make sense without an accompanying except, and so there's no point in like talking about you know what database tables you're gonna migrate in the car when there's like very associate itself. I also agree that this, I think the CAP is the wrong place for this kind of stuff. The CAP is evolving as its implemented and then there's a key. There's like there's a point where it becomes reasonable to think about the + +[51:00] Details for downstream, but the high level details can be thought of separately and should be done in a working group and I think most of the time there should be a set associated with the CAP because something is motivating this work right, and that should be the self. Yeah, that's kind of what I was gonna say, Eric, like maybe we should have a new standard which is like we don't accept CAPs that don't have sex and this EPS might not be like a hey. Here's some you know new off protocol that you know climb server need to implement or something like that, new end points. It might just be like hey, like Horizon is gonna be broken by this and we know lots of people depend on Horizon. So it's part of because that's not oh and there can't be a rule, right? I mean if we like fixed, like when we fix the bucket list or whatever you know like that. You know you don't need a set always, but, like you know some, when you're adding a new feature, like it generally is gonna make sense to have a step involved. Well, maybe it's a question that you have to answer. Does this require a sensor? Is known link, like? So maybe one of the header fields should be + +[52:00] Like associated steps. Maybe there's multiple ones. Right, there's new feature that can be used in two different ways. So it's a kind of metadata thing that needs to be out it well while we're on the metadata, this is kind of minor, but you know, I mostly think that's fine. But I'd like to argue for keeping the term author instead of recommender and maybe having a maintainer instead of an owner, just because I want to make sure that everybody who contributes to writing a CAP gets, you know, appropriate credit and the term author kind of suggests that you deserve credit for something, whereas recommender doesn't necessarily carry that same connotation. I mean those are names from. Yeah, I guess they are from different worlds, like the names I used, where the from the RACI thing, right, so, and that's why I stand out. Oh, just doesn't race to use authors into a, for often, no, according to be read, it + +[53:00] Means several different things, one of which is how many things which, like, definitely makes no sense in this context. Yeah, what makes no sense? Acceptor: yeah, all names of superheroes. Yeah, like, we can try to tweak it as long as we have clear definitions of what those things means, right? Yeah, I think, just to chime in again, I think, like, what we're describing maybe is a little bit closer to our current process, and so I'm worried it might fall into some of the same pitfalls. Like, I mean, maybe it's just kind of a terminology thing, like in my experience, except did doesn't talk about the rot changes, the Horizon end points. I, I've never seen that in this app and I think I don't care what document it's called, exactly where it appears, but I think there needs to be a link in the CAP, at least, if not included directly in it, that explores changes to + +[54:00] Horizon, changes to the SDKs, because I think that's been the issue so far as we have a disconnection between the CAP and then the implications it has downstream in this nasty case. So, as long as we cover that in some kind of dock, maybe we kind of change that aceptas to include that, yeah, or we can do that posit ori or some new place to store that particular kind of dock, right, Mike? Yeah, like I said, like a CAP links to, except in acceptance to, like a, you know, Horizon working document or something, whatever we call it. Yeah, I support this. I just want to make it clear that, like we may not find all this stuff at the right time, right, it's good to think about it at some level. But, like the earthy change was very subtle. It's in its one line, in a piece of XDR that you have to notice, and then realize that actually that's implemented within in 32 in Horizon. And then there's all these consequences. We are not gonna get all the things, so I wouldn't want to like gate the CAP and the only way + +[55:00] We get those is if we like release some. Protovision of core, and then it had a long time to implement all the Horizon changes and discover them. I think, in the same way that the CAP evolves as scored as their implementation, some of the Cottagers will become visible, as you think, 3ds annotation and the SDKs or Horizon. Yeah, and my point was: I think this due diligence had to happen before the CAP gets into the final state, even if you missed some stuff. I mean no, but the point is that you know what Eric was saying. For example, all days that change, people have to actually go through every change, think about every change, because that's how we do, right, the CAP. We have to actually think about you know in detail what does it mean to make that change in call right same thing for the other systems. I feel like part of the surprise with the feeching, with the + +[56:00] Change in columns in the Horizon was just how long it took, right, like it. Maybe I'm wrong, but I think it was hard to know. And so you started doing it like, oh man, that copying and pasting, like basically creating these two new columns, is gonna take forever in. Like, okay, yeah, about that move, yeah, let alone, I guess. But like in general, right, it's more like we should be, like maybe we shouldn't, we would not have identified this free change until what, much later. It's more about the pattern, right, that we should evolve. If you think about you know like, oh, there's this schema change coming and maybe we don't done exactly like the implication, but we know there's this big schema change, yeah, and then hopefully, you know, the more eyes we get on that thing, people, you know, remember all. When you make this kind of change, you know it's going to + +[57:00] You're going to take a hit. Yeah, I mean I, definitely support this. I support through instant due diligence, upfront and trying to think it through at a point where we can still make changes in the cat. That seems like a no brainer to me. I'm just saying to understand all of the implications is very difficult and we're not going to sit down and think really hard and figure that out just to be sure, okay. I mean, do you like the idea of listener, listening out the changes to the Horizon, API, for example, before finalizing the cap? I like, is this some kind of summary that says what we think? The impact is our high level. I just don't know how detailed that can get, but we, you know we'll try our best and so happens now. The other is: it says representative group of stakeholders, which is, I guess, intentionally vague, and so we have to be careful that we don't fall into the trap of thinking about the consequences, fall for something like Horizon and then forgetting something + +[58:00] Else, like maybe the consequences for hardware wallets, you know, for example, yeah, definitely, yeah, that's the reason is vague is because basically there is a when the working group is kind of forming, it should actually decide who are the members, right? I also think, like those members might be thinking over time- that's something I've been thinking this whole time- or it's like at the beginning, you need people to know how poor works. You know, plus some people who have like a reason to motivate this thing, and then, as the core part of it develops and it's like, oh, okay, like we have these ideas for how we could do things, like let's make sure we're like none of these are offensive to Horizon or offensive to Hardware walls. And then you start roping those people in and you got a different set of voices. So everybody gets to spend a day with John. What a present for everybody. + +[59:00] Okay, I mean, obviously that's needs thought and work and it. But I think that it should kind of kicked it off and we can talk that more. I don't know that we're gonna come to any greater conclusion after this. I mean, it seems like we could talk about this on the dead mailing list and eventually I feel like we need to update the readme, or on the kathleen and probably like the actual template for Cap's and work on working groups. We should not even great to work on working groups may be a way forward is everybody can comment on this issue that Nico's raised and we can iterate a little bit because, as he says, it's not the. We're not saying this is the dif, we're saying his a starting point. So we should have had this discussion. Great, perfect. So we'll start the conversation there, which is issue 624 in the protocol. Really cool it's. I'm just gonna take one minute and say that you know obviously we're part of the way through. All of this impacts what I'm the upgrade process, protocol thirteen. You know we delayed the boat + +[01:00:00] Through two weeks and I think we've learned a lot about just better notification. So I think you know the basic idea which I've post or is to work out the timeline for Horizon and SDK readiness and I think these working groups will help do that then to pose in a great guide that tells everyone what to do and where to find stuff, message all the relevant channels and then to verify that key projects and the ecosystem are ready. Basically get them to write back to us and say we are ready to upgrade. Then I think, do the tests enough, upgrade, then coordinate for the public, nib upgrade and then once an upgrade happens, we post an upgrade notice with all the errors and any information about how to use the new features. So it's like all concise and that's again just a proposal that we did most of that with protocol thirteen but we didn't reach out specifically to key organizations until later in the process and it's gettin, it's gettin and it's gettin somewhat better. But I'm open to any suggestions that anyone ever has about how to improve the process. Whose a key + +[01:01:00] Organization do we have like a mailing list of key organizations or something by the list. Yeah, is that anyone can join, inotify like. Places where anyone can join, which are all of our public channels, are notified out the wazoo right. So, like, they get first notification. They get notification every time that there's an update and a week in advance. So any place where people opt in like. But we also need to reach out to people who will not opt in to our own channels like. So, specifically, what too late this protocol release is like making sure that crypto exchanges, where you know they deal with a lot of different networks and Stellar isn't first and foremost in their minds. It's hard to get their attention or get them to opt into a channel. So, yes, anyone can opt in and we can check readiness or anyone goes opted in but we also need to do some spoke outreach people who have difficulty: okay, radical. So that's + +[01:02:00] The end of our first broadcasted Stellar Protocol meeting. How's everybody feel? + +
diff --git a/meetings/2020-06-05.mdx b/meetings/2020-06-05.mdx new file mode 100644 index 0000000000..c41043ec49 --- /dev/null +++ b/meetings/2020-06-05.mdx @@ -0,0 +1,115 @@ +--- +title: "ZkVM: About the Motorcrab" +description: "An overview of Project Slingshot and zkVM, covering confidential transactions, programmable predicates, and a modular zero-knowledge architecture designed to support scalable, multi-asset smart contracts." +authors: [oleg-andreev] +tags: [spotlight] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This talk introduces Project Slingshot, an experimental zero-knowledge blockchain architecture centered around zkVM—a programmable transaction format designed to combine confidentiality, composability, and scalability. Rather than starting from protocol mechanics, the session frames the core problem: how to build a public, auditable ledger that still supports private balances, private transfers, and customizable financial logic without constantly upgrading the network. + +At the heart of the design is zkVM’s execution model. Transactions are treated as small, self-contained programs that run inside an ephemeral virtual machine. As the VM executes, it builds a constraint system and verifies it using zero-knowledge proofs (Bulletproofs), ensuring assets are conserved and rules are followed without revealing sensitive data. All state changes are expressed as inputs and outputs, allowing transactions to be validated independently and in parallel, while higher-level features like accounts, order books, and payment channels live outside consensus-critical code. + +### Key Topics + +- Motivation for Slingshot and zkVM: encrypted assets by default, public verifiability, and extensibility without frequent network-wide upgrades. +- Modular architecture: cryptographic libraries, VM, networking stack, and blockchain state machine designed to be deployed as a sidechain, private ledger, or embedded node. +- zkVM execution model: transactions as “PDF-like” programs, stack-based execution, constraint-system generation, and zero-knowledge verification. +- Contracts and predicates: locking assets behind public keys or programmable conditions, including taproot-style branching. +- Confidential value flow: linear asset types, mixing (cloak), and protection against duplication or unintended asset creation. +- Example applications: confidential payments, programmable order books, collateralized loans, and two-party payment channels. +- Design comparisons: contrasts with Stellar’s built-in operations, Ethereum’s global state machine, and Zcash’s fixed circuits. +- Roadmap and integration questions: running zkVM alongside Stellar as a sidechain, bridging Stellar assets, and packaging networking and tooling for broader experimentation. + +### Resources + +- [Demo site](https://idioms-demo.stellar.org) +- [Project Slingshot GitHub repo](https://github.com/stellar/slingshot) + +
+ Video Transcript + +[01:00] Hi. So hello everyone. Today we're going to talk about project slingshot and ZJ diem. My name is Alejandra and I work at Stellar on this project for the some time already, so I'd like to give you an overview why we do that, how we're going to use it and why is it interesting so let's do the time check all right. So the problem that we have is: we know about the ball change. So how do we do the blockchain that protects the private information while being in the public? Right, it's a public system that's fully auditable and transparent, but yet we do the private transaction in it so effectively. We want something like ledger with TOS. So there are two major components to it. One is the confidentiality part, so for + +[02:00] These we need some sort of encryption for our balances and accounts and amounts that we transfer. And since it's a public system, we need a public verification mechanism for proving that the encrypted, the Hylian transfers, are actually correct and respects the balancing equation. But people, when they transaction, they don't create the money out of nowhere. So we need some sort of zero knowledge proof in our technology, static. And the third important component is that: some customization mechanism. Why is that? The blockchains are especially consensus systems. They the work best when they're mostly decentralized, deployed widely and as many applications. The users run them and execute the rules + +[03:00] And check the transactions, and this means that it's very hard to upgrade the system with new features or improvements. So we need to, on one hand, to make it as simple and focused as possible so it's not buggy, but at the same time we want to build some interesting features around the system, like from smart contracts to whatever you know- decentralized applications to improve the security of people transacting in this new environment. And one way is to just roll out network wide upgrades, which is as hard as it is decentralized, or, on the other hand, foresee some customization points so people can build things on top of it without changing the underlying rules. So this is where the VM part comes in. We + +[04:00] Need some sort of program ability for our system and if it's programmable, it means that there are programs and it means that there is some sort of virtual machine that is going to execute those programs. So this is where the DM part comes in. Now, what is slingshot and how does it relate to the akkadian? So slingshot is an umbrella term for the entire suite of libraries and protocol implementations that include not only the can- a more interesting part- the transaction format, the zero knowledge proof sand, the programmable conditions, which is the kvam, but also all the exhibitory cryptographic libraries, such as signature schemes, Multi signature things, the multiple tree implementations, all the way to the entire ball chain system, where the blockchain state machine is a + +[05:00] Separate protocol implementation, and the peer to peer networking stack and higher order application protocols, such as the mechanism behind accounts, the protocols for synchronizing- well, that's doing payment channel sorter books and all these kind of things that could be built on top of the core blockchain system. So this whole thing is called slingshot and it's all exists in a single GitHub repo where the development happens. Now, what's interesting about the TVM and the slingshot is that as a filing modular design. Why is it important? This is not a single application that has a specific way of using it. It's more like a collection of libraries that we can flexibly figure out how to deploy and + +[06:00] Roll out in the best manner. So we can build a sidechaining. Or we can use it as a private ledger, or we can use it as both. The cryptographic pieces are composable, so we can attach the off-chain proofs to our transactions. So, for instance, we can offload some computational cost from the network by building some intra party proofs about payments, for instance some notarization or zero knowledge other schemes completely outside of the blockchain. So we don't have to load the Internet work. With this computationally heavy problem, we can use the full node as a library embedded in the greater application. That for instance, does some interesting indexing of the transactions and we can decide how big the now it should be, how much data + +[07:00] Should store. For instance, the uijik, so state compression scheme allows us to have very little minimal state at the cost of a little bit more bandwidth overhead and you can decide whether you just have this very compact- who know them better than your wallet? Or you have way even lighter wallet, or you build it completely standalone applications and running on a cluster, for instance, with as much data storage as you possibly can, because you want to provide as much service to the very light applications, and this is why the default project is designed to be modular from the beginning. Now, specifically, the key VM is a transaction format that sits on top of the networking part and the consensus mechanism and on top of the KVM then you can build your custom application protocols. So what does it mean to say + +[08:00] That the helium is a transaction format? So, in other words, the key VM is a fancy parser. If you think about bitcoin or Stellar or Z cash, the transaction format and those protocols are is more like XML. So this is some kind of hierarchical structure with lists of things that contain other things that describe declaratively the flow of assets. In contrast, theorem is much more programmable, but the transaction format is more like a remote procedure call, where the transaction just sends a message to network and then you have the global state with some programs loaded into that state that can be invoked and do some operations, do some mutation into the state, and that was actually simply signed message into this global computer and the key mam is neither of those two things. It's more like a PDF, so it's sort of a declarative format, but at the same + +[09:00] Time it's an executable, a little language that you run through the VM. That is sort of like a parser that exist, the kids, the commands, does some local operations to determine what changes to the state should be made. So you can explain this better. This is the quick overview of the data model indicating. So you have transactions and transactions are objects. These little files transfer assets from inputs and APIs and they can also issue arbitrary assets because it's the multi asset environment, right, just like Stellar. So this is how the esse flows: from inputs to outputs and the outputs are actually the objects that we call contracts. They have a predicate that can all locks some content inside the contract and provides + +[10:00] Conditions to unlock the contents, and the contents are called the PD load and it's actually a list of things, and those things could be either values that are local, linear types, the things that you can move around but cannot duplicate, which are the assets themselves, or some data parameters. So your contract is this collection of values and data that can be locked into this container by a predicate, and the simplest way to think about the predicate is the public key, right? So you say whoever owns the corresponding private key can unlock the values and then decide what to do with them. And your unlock by showing the signature on a transaction. So you have a transaction that says I want to spend this value and such way. You have to provide a signature that authorizes you to unlock that contract. Or the predicate could be a little sub program. So you have to run the program, + +[11:00] Meaning that the VM will run the program and check additional conditions that are described by the program before deciding to unlock the values. And you can combine both of them with the scheme called a taproot. It's a like Merkle tree of programs and public key is. So you have the flexibility to decide things like. You can either have multiple parties signed together the contract or they could choose individually whatever branch they want to execute. So this gives you a lot of flexibility in terms of pre programming different conditions, that unlock stadium and the mining. And it's a little bit even more flexible because you can not only pre program some conditions or provide a signature, which was action, but you can also provide a signature over in new conditions so you can decide some + +[12:00] Program later and then sign it and then that program will be executed. So this gives you infinite ability to do some interesting multi state schemes while having pretty simple design. So how does this all work? Inside the VM? Again, transaction is just a program, like I said, like a PDF file. So in our case it's has a little- just a little bit- of metadata. It's a program that also has a zero knowledge proof and we will show in a second how this proof applies. So what happens? When you want to verify a transaction, you instantiate a VM, and by you I mean the Danelle that verifies the transaction. It instant Chase the VM. That is completely ephemeral. It means that it's not some sort of globally running VM like a dr container, a kind of thing. It's like an instance of a + +[13:00] Parser for this specific transaction, so it's very lightweight. You create it just for a single transaction. You load the transaction into this VM and it works as a simple stack machine. So it has a simple stack. It's not your incomplete, so it has a predictable performance and the goal of the VM is not only to provide you with ability to customize a conditions, but it also enforces the network rules. So the operations that excuse they are also checking that you don't duplicate money, that you respect the rules of the network and don't break that. So what the in des? It runs the program and the n as the program is running it's compiling on the fly thing that we call a constraint system, which will be used for the zero knowledge proof. So the constraint system is effectively just a list of linear and multiplicative equations that check that, for instance, the sum amount should, because some other + +[14:00] Amount or should, be subject to certain formula. But the cool part is that this constraint system works in zero knowledge, meaning that those numbers in the formulas that you are manipulating- they are all- can be encrypted, and so your program can manipulate this encrypted values and require that they conform to certain formula. And, as a result of execution of the program, you will have this sort of a matrix of constraints that builds this constraint system and, after the program finished executing, then the verifier runs the serial logical flow, VM checks. The proof is correct for this constraint system, which proves that whoever knew this secret keys that encrypted those amounts correctly combined them. So if there was a smart contract saying that a couple of values must correspond to a certain + +[15:00] Formula, then this- these values being secret, they're still correctly constrained by the formula. So this is the job of the zero knowledge proof and the VM does that in the end of the program execution. And finally, all the facts in the VM are fully local there, don't affect the global state. So it allows you to evaluate all the transactions independently from each other and verify in parallel. So how do we even do the effects? So all these inputs and outputs are actually recorded in the transaction log, which is simply a list of effects that should be applied to the blockchain state, and once we've run the program and verified this, your knowledge constraints- then we simply take this log, this list of effects that are effectively saying that you have to erase certain inputs and create certain outputs and then we apply them to the bot fisty. And this is very + +[16:00] Lightweight operation that can not only fail if you're trying to double span: the same output twice and two different transactions and if it happens that the second transaction tries to suspend the same input, then it will fail in the very end at that application stage. But this is the only stateful operation and the system. Now, how do you instructions look like? Think of this as a Stellar operations. In Stellar transactions you have realities of operations that you could do, but they are more like a list of things that are not really working with each other. They just enumerate what you want to do in a transaction, for instance, to send money from here to there or open offer or create an account, things like that. So in a similar Berean, the key VM has instructions that are high level, well liked constellar. They did their high level operations. They don't. They are not like assembly, one + +[17:00] Assembler language: low level operations that just manipulate bits, they manipulate values, they manipulate contracts, the high level concepts, but they are all composable with each other. So, for instance, you can instantiate a contract and then open it up and then take the values out of it, move them around, maybe mix and merge them together there and then lock them in a new contract. And this sequence of operations will be not just the list of them but actually manipulating those values on the stack behind the scenes. So here's an example: let's say you want to make a custom constraint, to do something about it. So you start with pushing some numerical value on the stack, instantiate a variable out of it that creates the variable inside the constraint system. Then you combine different arithmetic operations on it that, as a side effect, + +[18:00] Create those constraints and without actually computing, because the value could be encrypted. And you finished those constraints by saying verify, meaning that you can pull the whole constraint it. You want to record it into the constraint system so it will be checked later. Then you can. So it looks something like this: so you have a formula and you do this kind of stack machine as syntax for multiplying, adding, checking, equality and verifying, and then, once you have this variable all verified, you can create overboard value and output in a certain place. So you say, okay, this value is correct, so I am making a payment with it. And boiler instruction is kind of cool because it's a zero interest rates boring just within the life of the VM. It gives you to balance the negative and positive. 1 and negative has to be mixed it with the proper payment coming from somewhere else, using this zero knowledge mix + +[19:00] Instruction that we call cloak. It's implements, it's a whole clock protocol that allows you to mix M inputs into n outputs, preserving the assets but in complete zero knowledge. So you don't know where the asset flows within the boundaries of transaction. So these instructions gonna show you how you can combine the value of flow with custom conditions. And the important part is that this is all imperative, meaning that all the contracts they do not have kind of declarative conditions but they incredibly do things that they want to be done with linear types that cannot be simply dropped or created out of nowhere or duplicated. So entire clasts of security vulnerabilities are eliminated versus the access control lists always have this problem where you have hard time combining the different clock rolls + +[20:00] Together. Here it's very imperative, and Lear types help greatly by just literally moving the valley where it needs to be moved and then make sure that you don't break the rules along the way. So this is very powerful but at the same time, very safe system. So I'll say a couple of words about cryptography so can make it all work. It's important to have, on one hand, fancy cryptography but at the same time, pretty conservative one. So we have this stack of the high performance curve to 5519 implementation, on top of which we have the safe cryptographic group, the, which would build a bulletproof system, and that allows us to do this custom constraints. And we use the blue person two ways. In one way it's a cloud product call that enforces the network rules that mean that you cannot duplicate money and at the same time it allows you + +[21:00] To do on the fly custom constraints. And this is cool because many other 0no systems, like Z cash, they, do not allow you to do on the fly conditions. Instead, they use a very complicated trusted setup where several priorities must decide the rules and compute some crunch numbers in order to compose a single solid constraint system that will be used for all the purposes of the network for one time. And bulletproof allows us to avoid this trusted set up and instead build the constraints on the fly every time the transaction is running, because this is so fast and lightweight. And finally, if you provide the interface to all of that, we have these easy KVM instructions, and then, on top with you, using these instructions, this is where you build your applications, and this whole stack that we have is written in POS. So everything is very neatly compiled and one, first of all, hassles in an API, is in + +[22:00] Between two layers, and then it's compiled without any overhead in a while. Finally, efficient binary, just like you would have with C++, for instance. So another cool thing is that the DMA operations in the KDM are designed to be either cheap or expensive, which is funny, but the point here is that the chip operations are very cheap- it's really like a parser- and all expensive operations are completely uniform- a scalar point, multiplications in this cryptographic group, and this means that they can all be deferred and done in a batch, which makes them much faster. So, effectively, what you have is you have a very quick run of the program, then you have a bunch of things to compute in a cryptographic group that you can batch together and do very efficiently. Later you can even imagine doing that. And in GPU, if you have to verify a lot of + +[23:00] Transactions and then you have an answer where the transaction is valid enough. So this gives us very low educational agency. For the features that we have. It's less than one millisecond per output, so effectively, like lesson no, second for payment, or more like Costas begins, maybe like two milliseconds per payment, because payment Nelson always have them change out but, and we use all opportunities to prove performance that we can. So if you have multiple signatures they all can be aggregated and then batch verified and transaction size is pretty distant, so it's like one 1 2 to 2 kilobytes and this growth slow very quickly. So if multiple parties join together they greatly optimize the total cost of the network. And so a couple of examples to how you can build something with this. So one is the basic payments. So the cool part is things like accounts and balances. They are all off + +[24:00] Chain concepts. As to the basic network on the season, it's an APIs and all the interesting kind of data wrangling. It happens I've seen that provide security for the people involved in, for the party stress of acting and greatly improved performance because there is a lot of can, a complicated decision making is completely taken out of the consensus critter parts and placed inside their application. This also allows you to innovate faster because you can do different protocols and try different versions of like payment channels or be famous protocols completely outside of the can a consensus critical part. So you don't you have to only upgrade the knows that care about this feature and not the entire network. So the orderbook is an interesting example of smart contract. So + +[25:00] It's simply a contract that allows you to unlock the value being offered for a price. So this is where you use the predicate. I can pack several conditions inside the predicate and the cool thing about the zero knowledge is that you can do this both and public. So it's like Stellar, like completely public offer and there's no nothing secret going on, but you can still do this kind of semi public. So if you have a pool that wants to be kind, forced on the network and be auditable by authorized parties, but you don't want to do publicize exact prices and exact assets that are being offered, then you can also do that as well, because all these formulas work on the encrypted values as well and you can notice that this contract allows you to do imperative description of the conditions that you want to do. So you check that certain quantity and the + +[26:00] Price is moved to certain address and then you unlock the asset that you have. You never I have to do this in a passive- checks that whatever is provided or whatever like feature on that transaction object is set or some flag the set versus. In Bitcoin there is very complicated kind of hashing flags, so there's none of those things. You can empirically just do the things that I have to happen and then if there are some kind of negative amounts left on the stack in DBM, then the user has to provide appropriate amount of payment to cover them. And finally, payment channels is a nice example that shows you how to do complicated state machines. In this model where you don't have global programmable states, like in Assyrian supima channel, natural is a state machine between two parties that can do payments without settling them on the network and the only thing that network sees is that the initially deposit some + +[27:00] Money, in that they closed the channel at some point later, but all the intermediate movements of the money are not visible, but still they have to provide some assurances to the parties involved. So how does it work? If the one party becomes unresponsive, then the latest signed agreement could be used as a way to exit the channel and settle by another party unilaterally. So this state machine is implemented on top of transactions, so the states are embedded in those contracts and transitions are the transaction objects in themselves, which is pretty natural. It's just the intermediate assigned transactions. They are not published until you want to exit the channel. So this is how it looks in this model of inputs and outputs and transactions. Finally, to kind of wrap it up a little compressor with all the designs and then we can proceed to the questions. So it's + +[28:00] Kind of like a setting for discussion leader. So if you compare the design of the KTM, it's always very different but is founded on similar principles. So it's multi asset environment for modern financial protocols and the specific difference is that the data and asset values are confidential, encrypted, by default. You can, of course, make them public if you want to do any sort of conditional or conditional disclosure schemes, if you need some audit. The concept of accounts is completely off-chain and the verification of transaction is largely stateless and your mechanism for customization is this: composable instructions instead of just enumeration of kind of thicker, higher level operations. And another important + +[29:00] Difference is that the orderbook logic installer is built in and has some kind of stateful conditions that will allow were more interesting kind of multi party map price assurances. In the KVM there is no such. Build the open concept, you can build all sorts of orderbooks on top of the system yourself, depending on like how complicated he wanted to do to be. Compared to the Syrian atrium is also a design for the customization and smart con metrics, but the model is quite different. The etherium is sort of like a global state machine with a lot of code and like libraries of code that can call each other and do a lot of stateful changes. In the KVM all the changes are isolated and limited to this creation and deletion of the outputs. And another important part is that the KVM is not cheering complete by design. So it really + +[30:00] Acts more like a parser and allows just as little composition between the instructions in data as necessary to create ease for financial operations, but not to just program whatever. And the assets are first class types that you can move directly around instead of being some entries and a miniature bank that you have to program inside your program, inside the serial. So this makes the whole design much more easy to reason about because you don't have to win vent a little ledger inside the ledger every time you want to create a new asset. And finally, education Manero. They they're heavily focused on the privacy and they provide some cool non interactive obfuscation of the transaction graph, which the key DM does not provide, but you can still do this with teams like pointer, and so when multiple people join multiple inputs to one transaction, then they can sort of + +[31:00] Mix them. But Cillian provides you with the ability to have customization in contracts where easy cash, Tamara doesn't have any, and still even have the confidential computation within those contracts and finally, the occasional manera, have single assets and to kvms design for multiple asses. So, as a summary, what we're having here is this experiment in combining customization, confidentiality and scalability features in a single project and using two to the optimal degree: all the latest inventions in the blockchain space, where we have reasonably innovative cryptography and but still trying to be on a conservative side and trying to make things very scalable and high performance and, you + +[32:00] Know, obviously pushing as much as possible to the kind of chain application to design so we can innovate faster and have the core consensus critical parts as simple as possible. So that's all I got for my talk today, and then we can transition to questions. You can check out the demo at the idioms: demo Dostal org and there is a star slash slingshot repository on GitHub where all the discussion and design work is happening daily. Thank you, + +[33:00] Oh yeah, so we received some questions. So the first question is: can the gibeom law to be applied to the Stellar blockchain or is it a separate sidechain? The deviant watching is entirely separate system that can work as a side chain, or it can work complementary, like it can work as a side chain. Complementary reaches dollars, so you could have a set of validators overlapping both systems and using the same Stellar consensus protocol. So we can operate in parallel and we have some thoughts on how to do the import export mechanism for their Stellar asses from the main chain into the GK VM side chain at the same time, and the similar mechanism can be used for the Zika VM in private setting. Let's say, you have a organization where you need a robust + +[34:00] Internal ledger that is cryptographic, assured and limits the information exposure, and you would like to use it internally but also interoperate with Stellar. So that's also a good idea, because then you can protect your internal data from your own system administrators and your IT department. So you can more precisely specify who sees what information if you run their thus giving and blocking, just as your internal private letter. The question is: a Kinney build an automated node for collateralized loan. How would you invoke locking for deposits, and is there an up code, or do you require an operator in between? So this is a cool question because collateralized loan is one of those things that teaching naturally build + +[35:00] With the KVM instruction set so you could make an agreement that the person who receives the creditor receives the money if they lock some digital asset as collateral and this lock will have a custom predicate that allows you to take it out of, allows that, sorry, I lost the creditor to take out the loan after some time out, and also allows the person who takes it takes the loan to unlock the collateral, provided they make a payment and the contract, the predicate of the contract, will have a provision area formula to check that the payment was done, including whatever required interest rate before the tenant. So this scheme requires you to build a + +[36:00] Little bit of this kind of smart contractor program to embed in a transaction and build some infrastructure around to kind of support it and the Wallis to use this feature, but then it provides both parties good security against each other. They can simply unlock the de collateral with mutual agreement, but if they disagree or don't cooperate, then each party has the clause that protects them from another. So the creditor is assured that after a timeout they can simply take the collateral salt in the market and the debtor is assured that they don't need to cooperate with a creditor. They can simply take back the lawn to take back the cloud or, provided the payout alone, to pre predefined address, and so that's a perfect example for the use of the KVM and you don't need any third party to intermediate. Ease these + +[37:00] Elections was fully like just to parties and without any trusted third party. To your medium, I was able to provide a timeline for when he expects to get him to be fully complete and live on Stellar. So that's a trick question because, like the GBM itself was pretty complete. What's required, which is a canal, like annoying engineering problem, is to build this kind of blockchain, kinda networking infrastructure around it so you could integrate it just with the kind of networking nodes which is an ongoing problem right now with. We have the blockchain state machine, we have the peer to peer network components, but there is also, you know, also kind of wearing the right packaging. You have a node with the UI stuff and things like that, but also there is the integration + +[38:00] With the installer consensus protocol and integration with the Stellar assets. There are two separate problems so far. I would like to have an assistant as possible, but we're kind of getting out of the stage where we have to build something within this gonna slingshot project and now it's, you know, in the last, in the ongoing wakes, we're going to really face the problem of integrating where the Stellar consensus protocol and with a Horizon and with like input of the assets. So we're working on the roadmap for this right now. So this require not just kind of work on the project itself, but more like an integration work and working with more or engineers on the store in a broader, + +[39:00] Taller side. So the roadmap isn't being in development right now. We have any more questions? Well, thanks everyone for joining us today. We're glad you've watched this livestream and if you would like to participate in a project and I have questions, would like to integrate things with your applications, then please find the GitHub repository, which is a Stellar slash slingshot, and I would + +[40:00] Be glad to see your issues or pull requests. Thank you so much. + +
diff --git a/meetings/2020-06-26.mdx b/meetings/2020-06-26.mdx new file mode 100644 index 0000000000..8a370adb6a --- /dev/null +++ b/meetings/2020-06-26.mdx @@ -0,0 +1,73 @@ +--- +title: "User-Friendly Key Management with SEP-30 Recovery Signer" +description: "An Engineering Talk on SEP-30, explaining how recovery signers enable noncustodial wallets to offer consumer-grade onboarding and account recovery without seed phrases or centralized key custody." +authors: [leigh-mcculloch] +tags: [tutorial, SEP-10, SEP-30] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Leigh McCulloch presents SEP-30 (Recovery Signer) as a response to one of the biggest barriers to mainstream blockchain adoption: key management. He begins by grounding the audience in Stellar’s account model—master keys, additional signers, thresholds, and weights—and explains why most wallets still rely on fragile patterns like written recovery phrases or encrypted server backups that either overload users or require excessive trust in infrastructure. + +SEP-30 reframes the problem by shifting focus from backing up a single master key to preserving control of the account itself. Instead of asking users to safeguard secret material forever, wallets manage rotating device keys while registering recovery identities (such as phone numbers or email addresses) with independent recovery services. These services can co-sign transactions during recovery but never gain unilateral control of funds, preserving self-custody while enabling familiar, consumer-style recovery flows. + +The talk walks through a concrete example using a fictional user, Alice. Her wallet generates a master key, derives a device key, removes the master key entirely, and registers two independent recovery signers with limited weights. If Alice loses her phone, she can authenticate to both recovery services, add a new device key, revoke the lost one, and regain full control—without creating a new Stellar account or exposing a seed phrase. All of this can happen behind the scenes, delivering an experience comparable to modern consumer apps. + +### Key Topics + +- Stellar’s key model: master keys, additional signers, thresholds, and why most wallets still rely on a single key. +- Common wallet approaches today: paper recovery phrases, encrypted server backups, and their usability and security tradeoffs. +- SEP-30 registration flow: wallet authentication via SEP-10, identity registration, and adding recovery signers with limited weights. +- Recovery flow: authenticating via phone or email, co-signing transactions, revoking lost device keys, and preserving the same account address. +- Benefits of the model: no single recovery service with full control, optional recovery phrases, multi-device support, and rapid revocation of lost devices. +- Why UX matters: aligning noncustodial wallets with user expectations from mainstream consumer applications to drive adoption. + +### Resources + +- [SEP-30: Account Recovery Signer](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0030.md) +- [Stellar-dev mailing list discussion on SEP-30](https://groups.google.com/g/stellar-dev/c/SFr2dHBZlsY) + +
+ Video Transcript + +[01:00] Hi everyone, welcome to the Stellar development foundations engineering talk series. My name is Lee McCulloch and I'm a software engineer at the foundation. I'm one of the people working on improving the experience that individuals have using Stellar and the businesses have building on still, and today I'm talking about Stellar Ecosystem Proposal 30. SEP 30 is an approach to make still a key management user friendly in products built on Stellar. So a user friendly team andr is maybe a bold statement to make, because key management is rarely user friendly. But that's what we're going to talk about today. First, I'm going to set some context and touch on what key management looks like on the Stellar network, how key management relates to Louis and then some common approaches that while it's take in the Stellar ecosystem and in the blockchain space in + +[02:00] General. Then we'll jump into the approach taken by a SEP 30 or through an example, step by step, of how Sept 30 works and then reflect on why this is valuable. We'll also have a Q&A at the end. So if you have any questions during the talk, please drop them into the YouTube chat box and we'll get to them at the end. So what is key management and why do we care? At the core of owning a Stellar account or an account on many blockchains is being in possession of a key on the Stellar network. If you have an account, you have a key for that account. A key is made up of two parts: a public key that you share with others and a secret key that you keep private. This is an example of what the public key looks like on the Stellar network. It starts with the G. The public key is your account address. People send funds to this address and they'll show up in your account balance, + +[03:00] And this is what a secret key looks like. It starts with an S. On the Stellar network, we call the secret key that defines your account address accounts master key. If you create a new account on the network and don't change its configuration, your master key is what you use to assign transactions. It's what you use to control your account. So what do we mean by signed transactions? Stellar accounts have silenced. A signer is a key that can authorize transactions for a Stellar account and the master key it's the default signer of the account. Being a signer vostell account is sort of like being a signer on a bank account. You can sign cheques for that bank account and banks will accept the check as valid. If you get a new account on the network, it's got one signer and that signer is the master key. But accounts can also have more silence. In addition to or instead of, the master + +[04:00] Key as signer is a signing key, just like a master key. Accounts can be configured to let signers authorize transactions individually or together in a group. On Stellar. That configuration for what combinations of signers are required to authorize transactions, is controlled by setting a threshold on the account and then weights for each signer. An example of this could be an account that has a threshold of 20, with signers that each have weights of 10 need two of these signers to sign a transaction for the transaction to be valid. Another example might be an account that has a threshold of one where any signer could sign the check by itself for the check to be valid. Much like a basic joint bank account. You'll hear people use the term multisig to refer to accounts. They use + +[05:00] Multiple silence, but most accounts on the network only use the master key and so they only have one signer. Some have used an additional signer as to factor auth. The control of the account still hinges on possession of that master key. So key management is how we manage all of these keys and will it play a pivotal role in key management. Because of the core of a wallet is key management. A wallets core job is to hold your salary calm, and so its core job is to store your key, protect your key, help you use your key to sign transactions and then prevent you from losing your key. So let's talk about some common approaches to key management that are being used. So many wallets they only use that master key. This means your account has one key, one signer, and that key is stored on the device in the wallet application. Those wallets can help you + +[06:00] To not lose your key using a few different methods. One approach is to give you Eve the key in some form, to write down on a printout. The key lives in two places: it's in your wallet, on your phone and it's on paper as a backup if you lose your phone. This is great in terms of simplicity, really straightforward to implement. Your key is backed up. But it does present some challenges. It puts a lot of responsibility on the user needs to write down the key exactly correct. If they lose the paper, it's written on. They've lost their backup. If someone else finds the paper, their account is easily stolen and it can be a jarring experience to ask a new user to write down a lot of text immediately after dev installed a new application that they may not be committed to. Another approach is to store a copy of the key on the wallet server. But this has some different challenges. The + +[07:00] Users gotta trust the wallet server completely because the server will have access to the key and therefore control of their account somewhere. Let's address this challenge by encrypting the key using a password that the user enters into the wall, all it but has never shared with the server. This means the wallet server is not able to use the key because it cannot decrypt. But this only works for wallets and products that require the user to make a password that won't be shared with the server, which is yet another password for the user to lose. So all while its do key management. But most focus on managing one key- got master key- and backing up that master King. So let's talk about the approach that sub 30 takes. SEP 30- recovery signer- is a Stellar Ecosystem Proposal. That means it's a proposal that contains a new standard or a change to + +[08:00] Existing standards that is build on top of the Stellar network and for use in the Stellar ecosystem. SEP 30 is an approach to key management that's focuses making key management and specifically the preventing loss part has user FM as user friendly as any other consumer application. Its goal is to work without recovery phrases, to have no server with a sole control of the account and to not require the user to remember a secure encryption password to encrypt their key. SEP 30 is currently being used in one wallet, the vibram wallet, which is a noncustodial? U US dollar saving wallet- the store's value on the Stellar network. The vibram wallet is currently in open beta and presents a user experience designed for the general public. You login with your phone number. + +[09:00] You don't need a password. A wallet like vibrant that is implementing SEP 30 still does many of the same things that other wallets do. It's a store a key. They protect a key and they help you to use your key to sign transactions and they prevent loss. But the loss that prevents is different. It prevents you from losing your account and not your key. A wallet implementing SEP 30 weren't manage and backup a single master key. Instead, the wild manage device keys and help the user to get new keys. The user Stellar account won't change. Its address will stay the same, but the user signing keys will change. So let's have a look at what. Sept 30 is Sept 30 to 5. That provides two endpoints: one to register accounts and another to sign transactions for that + +[10:00] Account. A wallet uses that first endpoint to register an account with the server. In the request, the wallet tells the server what identities are allowed to request signatures for the account. Those identities can be things like a phone number or an email address. The wallet authenticates using septum, which is a Stellar eco system proposal that's widely used already. That defines a challenge and challenge response authentication flow that the wallet uses to prove to the server that it possesses keys that can sign for this Steller account. This way to the server, noise of the client should be allowed to define who can sign transactions for their wallet in their response to while. It gets back a signing address for the server and it makes that address a signer of the Stellar account. The second endpoint is transaction signing. A wallet + +[11:00] Calls this hand point with a transaction it wants to serve, it assign. It'll do this when it has lost its key and it needs the server to sign transactions for it. The wallet authenticates using an identity provided during registration. If the phone or email matches an attendee stored with the account on the server will sign the transaction for the seller account and returned that signature to the wallet. So let's have a look at an example and we'll walk through how well it can use these two endpoints to support account recovery. For the example, I will give the user who is using the wallet a name and we'll call her Alice. When Alice creates an account with the wallet, the app on her phone is going to generate a master key and then the wallet server is going to create the account associated with that master key. The master key will be one of those big + +[12:00] S secret keys that we looked at the beginning, and it's the red key in this diagram. The red key never leaves Alice's device. The app on her device doesn't give this key to anyone with SEP 30. We can implement this wallet so that there is no need for the master key to be backed up anyway. Alice's wallet also generates a second key, what we'll call her d'lai scheme. It's a key that is only for this device to use to sign transactions. In the same way that the master key is not shared with anyone either. Palace' Swallow submits a transaction to the network, making that device key a signer of the account and removing the master key has a spoiler. Alice's wallet then deletes the master key. It's not useful anymore. The wallet then goes through the process of + +[13:00] Registering the account with to recovery cells. Each server has their own key that they generate themselves. Each server is hosted and controlled independently by different entities. The wallet proves to each so they independently- that it has authority over the account by signing a sub 10 transaction and tells the to recovery servers that anyone who can prove possession of the user's phone number or email should be allowed to request transactions to be signed. The wallet submits a transaction to the network adding both recovery signers as cylons on the account but limiting the weight of those signatures so that neither recovery server has independent control over the value in the account. Each recovery server will be at assign a transaction at the request of the user, but the transaction will only be authorized if signed by another independent honey. The only individual with + +[14:00] Independent control of the account is Alice. Here's an example of what it looks like on the public block Explorer on the public Stellar network. This is Annie. This is an account on the public network with its threshold set to 20 and there are four: silence: the master key at the bottom, with a weight of zero, meaning that it has no control over the account. A device key with a weight of 20, so that the device has complete control of the account. And then to recovery servers that each have those weights of 10. So let's imagine that Palace has lost her phone and she's purchased an earphone. The greyed out phone in the top left is her lost phone that still has her device key on him and her new phone has no key for the account. Alice's new phone goes through the same process that a previous phone did when it signed up with the + +[15:00] Wallet server. It generates a device King and it signs into the wallet, but that new device key is not a signer of her seller account, so her device can erase transactions. Yet the wallet app talks to the first recovery server and ask the server to sign a transaction that makes her new device key a signer on the account. Alice then continues recovery with the third party is operating independently, so Alice authenticates with them independently with an SMS codes and to her phone number or a link or a code sent to her email address. Once Alice has authenticated the server, science the transaction and returns the signature to the wallet. The transaction has now been authorized with a weight of 20 and can be submitted to the network. The transaction removes the old signing key that was lost with Alice's previous + +[16:00] Phone and adds the new cyma key that lives on Alice's new phone. Alice is now back in control of her account. Finally, what's important to highlight is that while alice is in full control of her account, this process can be built into the wallet, application and be behind the scenes for her. What Alice experiences is the same user experience that we've grown accustomed to in consumer applications, which leads us to y sub 30 is very broad right now, great user experience drives adoption, and supporting account recovery through phone and email brings a noncustodial wallets user experience closer to that of other consumer applications. The registration user experience for Alice can be that she signs up like many consumer products, + +[17:00] Logging in with her phone or email, and the recovery user experience for hours is similar, requiring only a second SMS or email with a third party recovery provider. These are patterns that are familiar to users of consumer applications. This approach is also very flexible, allowing a wallet to still offer a recovery phrase that can be written down by generating an additional key to be their recovery phrase, or to allow for multiple devices where each device generates its own key that never leaves that device. This approach also makes dealing with lost devices simpler. A lost watch containing a wallet can be quickly revoked by removing its key as a signer, without needing to get a new Stellar account, and when Alice decides to purchase a new watch, it can be given + +[18:00] A new key again without interrupting any of her existing devices. But the court reason for why sub 30 really is valuable right now is user experience. That will drive adoption. So if you'd like to learn more about SEP 30, the proposal itself is about a 15 minute read and there's a thread on the Stellar dev mailing lift- without mailing lists- where you can ask questions, provide feedback or make suggestions, contributions and discussion welcome. And now we have some time for Q&A see what questions we have. + +[19:00] Okay, well, thank you, listen to me talk about subsiding. + +
diff --git a/meetings/2020-07-09.mdx b/meetings/2020-07-09.mdx new file mode 100644 index 0000000000..43ff02d9cd --- /dev/null +++ b/meetings/2020-07-09.mdx @@ -0,0 +1,178 @@ +--- +title: "Protocol 14 CAPs: Claimable Balances and Sponsored Reserves" +description: "The second Open Protocol Discussion broadcast reviews readiness for Protocol 14, focusing on claimable balances, sponsored reserves, and downstream impacts across Horizon and SDKs." +authors: + - david-mazieres + - eric-saunders + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - orbitlens + - tomer-weller +tags: + - legacy + - CAP-15 + - CAP-19 + - CAP-23 + - CAP-33 + - CAP-34 + - SEP-30 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Justin Rice opened the second livestreamed Open Protocol Discussion with a status check on preparations for Protocol 14. The session followed up on earlier meetings and focused on closing out open questions around the two largest proposed changes: claimable balances (CAP-23) and sponsored reserves (CAP-33). Participants emphasized that both proposals were in final comment period and close to acceptance, with remaining concerns centered on edge cases, implementation clarity, and ecosystem readiness. + +Much of the discussion explored how these protocol changes interact with real-world usage. For CAP-23, the group debated the structure and limits of claim predicate trees, balancing expressiveness against safety, XDR recursion limits, and ease of implementation in SDKs. For CAP-33, attention shifted to downstream effects—particularly compatibility with SEP-30 recovery signers, naming clarity for sponsorship operations, and whether certain invalid sponsorship “sandwiches” should fail earlier during validation rather than at apply time. The call closed with a broader conversation on close-time semantics (later CAP-34) and how subtle timing and sequence-number behaviors can affect smart-contract-style workflows. + +### Key Topics + +- CAP-23 claimable balances + - Predicate tree depth limits, recursion safety, and XDR implementation concerns. + - Tradeoffs between disjunctive-normal-form predicates and bounded tree structures. + - Agreement to continue design discussion on the mailing list before finalizing. +- CAP-33 sponsored reserves + - Compatibility issues with SEP-30 recovery signers and short-term mitigations at the SEP level. + - Renaming operations to clarify the sponsorship “sandwich” flow (`begin`, `end`, `revoke`). + - Consideration of whether invalid sponsorship patterns should be rejected during validation. +- Protocol 14 readiness + - Coordination with Horizon and SDK teams, including open GitHub issues for required changes. + - Working-group outcomes and the need for clearer ecosystem-facing documentation and FAQs. +- Close-time semantics (Issue 622 / CAP-34 draft) + - Edge cases where transactions can consume sequence numbers despite time-bound invalidity. + - Risks for advanced workflows such as payment channels and cross-chain swaps. + - Agreement to continue investigation on the dev mailing list before committing to changes. + +### Resources + +- [CAP-23: Two-part Payments (Claimable Balances)](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) +- [CAP-33: New Sponsored Reserves](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) +- [Issue 622: Request to change close time application](https://github.com/stellar/stellar-protocol/issues/622) + +
+ Video Transcript + +[00:00] Okay, we're live alright. Everyone welcome to the Open Protocol Discussion, a biweekly meeting in which we review and plan for upcoming changes. Then provements do the stuff call. We've missed a few meetings because people have been out of town. But our plan is to keep live streaming these so that the world at large anyone who's watching can see what we're working on and all the wonderful thoughts that you have about. I + +[01:00] Don't like Stellar better. These meetings usually focus on reviewing court advancement proposals, aka CAPs, which suggests new features and improvements to the protocol, and at the moment we're focused on Protocol 14, on the upcoming protocol, which will likely include two big CAPs: [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md), two part payments, and [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md), sponsored reserves. Today we'll cover a few remaining questions about those. We'll also talk a bit about the downstream implications of those CAPs and the preparations underway to make sure Horizon and the Stellar SDKs are ready for Protocol 14. So we published some pre reads for this meeting there in the event description and this discussion will make up a lot more sense. If you've done them, so do those if you haven't. There's also just a rough outline of the agenda on the left of the screen and there's also a list of participants in the event description. And for this meeting, instead of introducing everyone, I'll just let anyone watching Google the names to find out who's who. If that's not working in the future, we can take some time to + +[02:00] Introduce ourselves. But let's start there seems simplest. So diving in quick. Status update: [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) and [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) are in final common period. They've been there a little longer than usual. There were a few lingering questions. Discussion was a bit slow because people are way. But barring any blocker is raised in today's discussion, we will be night them with the coveted status of accepted. So first let's talk about those. [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) to part payments. [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) creates a new letter entry called a claimable balance, the new operations that allow you to create and claim a claim of a balance. The goal is to separate the sending and receiving with a payment so you can send a payment to an account that isn't already prepared to receive. It looks like there are a couple of issues here and I will turn it over to John to talk about the first one. Sure so, this is actually. This originates from a question that David brought up, I guess, a few weeks ago, which has kind of + +[03:00] Been lingering. But basically the question is like: right now we're using dislike predicates tree structure which is kind of novel in the Stellar world. You know, in other places we use like linear threshold combinations and stuff like that. But here we have this predicate tree. And David's main concern about this predicate tree is that, you know, XDR doesn't have any built in mechanisms to prevent arbitrary recursion. And so anybody who has an extra parser, first of all, like as a matter of principle, you should be making sure that you're not gonna get infinite recursion. But you could imagine a world in which you have some structure which is like sitting on the edge of the recursion depth limit for different nodes on different architectures and whatever, and maybe some nodes choose that it's acceptable, in some node trees it's unacceptable, and then you end up in some bad, undefined state, depending on potentially a fork in a really bad case, depending on, basically, just how everybody's configured. But they're different XDR, and so the question is + +[04:00] Like, how should we update this predicate tree structure? And David had this proposal to basically turn it into a into disjunctive normal form. But I've heard some opposition from myself, from Nicola and from somebody else that I can't remember right now who the third person is- that this disjunctive normal form idea is not really that great because, like, complicated expressions end up becoming like exponentially large. That's kind of not ideal. So we could either decide that we don't think this is a problem and we're not gonna be anywhere close to the recursion depth limit because, like, probably realistically a transaction, even with this predicate tree structure, has recursion depth, maybe like 10 or 12, and if your system doesn't support like 12 recursions, like that's a problem, hold on that. The idea is that a bad person could submit it to purposely submit a transaction that like pushes the recursion depth right, but that would be + +[05:00] Invalid, no matter what. They're automatically invalid if the depth is greater than four. So, like, if you submit, because we have logic in there too, we have to be exactly. Actually, there's a laterally invalid if any of the predicate trees have depth greater than four. So like it. Might you know, if your XD, our compiler, doesn't generate code that handles recursion like deep recursions, well, it might crash your node, but like if that's the case, your XD are set up, it's garbage and you have a security vulnerability, regardless of whether we've done this or not. Well, if we pick a number like four, we can actually admit it's ugly, but we can actually express that in any XD are pretty good. Tree one, two, three and four is like four different types. You know, leaf one, two and three or something I mean. But so that much is how like. Do we really actually have use cases that involve all of this? Like the thing is, you already have the disjunction because you can have multiple claimants, right, and so the question is, why not do + +[06:00] Something that has just a conjunction, you know, in the claim predicate and then you know we can add something more if it turns out that's not useful enough, or at least come up with some actual, like realistic use case that actually requires, like you know, like four, deep nesting of like hands and wars. So the four wasn't chosen arbitrarily. The four has this like very magic property of making a claim over balanced entry the same size as a, or rather the claimable predicate tree the same size as a data field. So, basically, like this bore was basically saying like if you're gonna pay reserved to get a data field, you might as well pay a reserved to get the same amount of space and claim a little balance. But so that's where the magic floor comes from. But regardless of that, I mean like for me it's not just about whether, like everything I think sensible, then you could express in disjunction format, like I don't see any + +[07:00] Reason why you couldn't do this. But the issue is like if we then subsequently add the conjunctive stuff, then we're no better off than we are now. Like we'd be strictly worse, in fact, because we'd now have this, the disjunction thing, which was your proposal, where basically, if you get disjunction, if you're the same claim into multiple times, that's currently not permitted at all. Claimants are unique in this in the current proposal, but then you'd also be able to build these trees and then you've like kind of violated the like one way to do it rule. So like that's kind of my main like. Basically, if we think we could ever end up in a world where DNF is not good, then we shouldn't start in a world where DNF is the standard, because we'd be no better off in the long run. So that's kind of my main take on it. One other idea I'll pitched you and I'm gonna credit this one too great in it's not my idea. But graden mentioned that we could actually to avoid the infinite recursion thing- XD, + +[08:00] ROP, KO phi, the predicate tree, and like we can, for example, make the xt are opaque version of the predicate tree like 64 bytes right now, and if it's bigger than 64 bytes you're dead. Like that's not gonna work, and then later if we need to get bigger trees, for example for hash Cree images or something, we just increase that number. But that provides like a default bound automatically? I don't know how much anybody cares about this. At the end of the day, like we could definitely do the hard coding thing. There's a lot of different options and I just like I don't know how much it matters to spend time trying to decide like, do people care a lot? Are people really concerned about this recursion thing? What is the use case that's driving the more complex? Like pretty good cheese. Well, the main reason for having the predicate trees is just that it's a lot more powerful than having Linear's like signer weights, + +[09:00] Basically like what we have on accounts. I like there are very simple situations where you would want an or, for example, or maybe even an and or an or, for example. Like you might want a situation where it's like: from now until this time, I can do something, and then from some future time until some other future time, I again have the right to do something. And now you've automatically got this tree structure implicitly. So, but no, but you can already have this because you already have multiple claimants right, so claimants are unique right now. They don't support repetition, but we could do repetition. Sorry, but oh, you're saying even the subunits of the claimants. If you have a conjunction of me before Tuesday and me after Thursday, you're saying those are the same claimant. has appeared multiple pi, correct, and that would be invalid. So, yes, I don't see why would you prohibit that? Like, I can actually see a number of reasons why you might want to do that because, like, maybe + +[10:00] You want to be able to, you want to, be able to be claimed by like a two out of three configuration, and so, like you know, I should be able to sign on two of the claimants, for example. So having repeated claimants actually seems like a very useful thing, unlike this alternating and in or four times. I'm not sure I understand why the repeating would be useful like other than in this disjunctive, normal form at world, like why, what usage could possibly come from that it's strictly less powerful than the tree, like anything you can do with the tree. You could also do with the claimants, like repeated claimants, but the opposite. Anyway, I want to come back to like: then I don't want to spend time about this, if nobody's concerned about this recursion issue, like my stamps on. The recursion + +[11:00] Issue is that if the recursion issue comes into play, you already have a security vulnerability because your XDR compiler doesn't handle recursion well, or your XDR library doesn't handle conversion. Look, Oh, recursion well, so not. But the question is like: do we define a depth specifically for this data structure or we just expect to conform to whatever your XD? Our parser knows how to do this. One has a big depth limit. There are two paths to it. Yeah, there are two paths straight right there. From a security standpoint, you want to be able, like: you shouldn't never use in, you know unbounded num, you know amount of memory if you get untrusted data on the wire right. And then there is the second part, that is a protocol. And fourth thing, that is, what is the maximum depth? And it's fall, so they. In that case, if that's really what we're gonna do, I would argue against the maximum depth because that's hard to + +[12:00] Implement, right, so people get it wrong. If we just have a maximum, do we have a maximum transaction size? Because then, because the thing is like, and maximum transaction size would at least be easy to enforce where did I think I'd rather I'm not go that route because, like right now, like John said, like the four, as it's kind of a nice property where we don't have to deal with variable based reserve requirement only, as soon as we say we have, we bound this to the size of the transaction. Now you're, you have to come up with an onion model. So this for how much to charge for return. Because now, like they're all going to be potentially a lot more predicates that you can put, Kristin, operations can get very big. So what's your? How big is your? Like a bad. + +[13:00] Payment is, you know, can get is very big. So how big is your implementation of the recursion depth, John? How many lines I suppose, for us? Siddarth did it so I don't have it in my head right now, but I my guess is it's probably like on the order of 20. That's how much I think it would take me to implement it, and how many places just invoke that function in the code, because it seems like we're gonna have to check this in a bunch of different places. Right, everything is just one. Yeah, it is very eight, because they're immutable. So it's just like in the validity path you check like hey, like how deep is this? Cool, you're good, I mean, and how so? We need like a like basically every. We need a version in the SDK and then we need to like check that the two are equivalent, right, it seems like any time is it difference? Like, any time an SDK can be convinced to create a transaction of Stellar Core we'll consider are incorrect or invalid. That's it, + +[14:00] That's exploitable right. That's it that's exploitable right because I'll convince you that, hey, we're gonna do this thing where you can like: get a claim, a bunch of money or whatever- and then it turns out that you can't because Stellar Core rejects the transaction. So I really I don't like this idea. But like, yeah, III don't think we should never do it. Like they're clearly times that's appropriate, but it just it doesn't seem justified here to add, like an extra validity role. That is not like. This is not a good argument. I think because, like in general, like you have an expression and you're saying, oh, like I want to look at the expression and I expect it to just pass in call but like, really like, if this expression is bogus, like it doesn't matter, like you know, it's going to be invited later, right? So maybe the reasons we should make it as easy as possible for SDKs and + +[15:00] Other software in the Stellar ecosystem to replicate the validity rules of Stellar transactions. And obviously sometimes we need to add extra logic. But given that we haven't cited a single use case that it wouldn't be satisfied by a simpler approach here, it just it doesn't seem justified. But again, I mean, I've said my piece, I'm not. I don't object strongly enough that I'm overruling this. I did both to put this to FCP so you guys can heed my advice or ignore it. I guess how would you feel about the like the hard coded thing, the you know least one to three thing, better, much better, because like that would be easy to do and it wouldn't really impact the implementation at all. Because all I would do in the implementation is just parse it once the real, like the leaf four way, or you know depth for way, and then I would just parse it again as an infinitely recursos structure. Once I check that it's fine and you know. And then you can just, like + +[16:00] You know XDR, start, you know SDKs and whatever, could just parse it as the for depth version and it would just work out of the box. Yeah, we would not even use the one with the labels in call. Yeah, we might not even use the one with the labels in core. Exactly, that's a good point because we know that we're secure against arbitrary recursion. So no, but the whole point is we want the logic in core to exactly match the logic in SDKs. Right, so the well, I mean we're paying, the sophistication is full. Maximum depth: fall right, so it would be the same spec, and they'll be giving a canned version to educator. I'm just for. I'm very skeptical that this, the four, is even necessary. Why don't we start with two and then, if we need for, we can do it. Yeah, we could start with two is fine, but I mean, like two hasn't helped you. A ton in the sense that like two has exactly the same + +[17:00] Constraints is full right, like you'd still need to check the depths and everything. Well, no, because two is like much less unwieldy to do with, like a leaf in an internal rate, like now. It's sort of it's not that bad to have two different types for it. Also, you would need, well, I guess, like do you mean is in like Route one? Because no, I meant just like. I just meant when you have an and or an, or it's an andron, or of something that doesn't include andin, or of a leaf that's a lot more restrictive. But again, like, what are the use cases here? This feels to me like were we're building a mechanism without an idea of the use case for the mechanism. Right, and you can always increase the depth, right? That's not? Oh, yeah, that's a third site. You could always increase, you know. + +[18:00] On the other hand, you know it may turn. out that the main bottleneck here is that it's like it may be like very unintuitive to have like and an award like complicated boolean formulas for this stuff. Right, like I think a simple one level, like a disjunction of conjunctions is something that you know it really makes sense like, okay, there's like three conditions on which we can do this. It's like this and this are true or this is true. But once you start going to more than two deaths, it starts becoming like fairly unintuitive. Like it's what you know expect to, you know feed into your Sat solver because you've generated from whatever, but it's no longer gonna correspond to sort of simple human expectations. So do we actually expect that we're gonna be having compilers generate these like really complicated predicates and that we're just gonna trust these compilers to do something sensible, or do we just like, if we have a use case for that, I'm okay with it, but until we do, I'd say, why + +[19:00] Not just defer having that complexity? I mean, I'm not opposed to going to in the short term and making it super simple and you know we can make it higher in the future when we need it. Like, would that be difficult if we started shallow and decided that it needed to be deeper? Would it be difficult? You know, it'd be very easy. I think that I have an idea how to implement this without the recursion. Maybe we can just allow the predicate to be an array of predicates and allow duplicate claimants in the conditions. So with these two preconditions, we can implement both and our cases, because duplicate claimants- for + +[20:00] Example, two or more claimants with the same account ID, with the same destination- will allow to implement our cases. And making the predicate an array of predicates educates. It, makes they were all soon easier. It's intuitive, it's like it makes sense, it is fairly expressive. Clearly there's a lot of different opinions here. So let's take this. I think we've, like, we've talked out some of the details. Okay, take it to the mailing list and try to come up with what's the right thing to do. Like, the beautiful thing is like none of these changes are going to be very material to the implementation, so we can plug in whichever one we kind of think is the right one, but I don't want + +[21:00] To take the entire time of this meeting talking about this one thing. Okay, so the plan then is to make these suggestions on the mailing list, evaluate them in synchronously and choose one for the implementation soon. Okay, that's what we'll do. Then email after this meeting and kicking that off. Cool then I. Or is there anything else that we need to cover on [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md)? Or it's that sort of procedure forward with that I think we could probably take. If people want to talk about the other thing, about like changing this before and after notion to having a not, we can do that, but we can probably take that to the mailing list. Oops, they're all kind of bundled up is one kind of topic. So unless anybody really wants to talk about that right now, I'd say let's take it to the mailing list. Great, think it's the mailing list, look out for that in your inbox. Everyone get excited. So then we'll move on to + +[22:00] CAP. Thirty three sponsored reserves. This proposal allows an entity to cover the reserve for accounts controlled by other parties without giving those parties control of the reserve. That extends account entries and ledger entries, so they were pertinent information about sponsorships: it creates new operations to initiate and terminate sponsorship and to update sponsorship information for existing ledger entries. The goal is to allow asset issuers and, while it's never user reserves, basically one issue came up, which is that there was some incompatibility with SEP 30, which is a Stellar Ecosystem Proposal that defines an API to allow users to regain access to a Stellar account after they've lost their private key, without providing any third party control of that account, and so I think these there's been some investigation of how to deal with potential incompatibilities and I'll turn it over to you. Yeah, I can give just a quick description of what the incompatibility is, and then we've + +[23:00] Got a short term fix at the SAP level and then John will talk about a potential way. We might want to think about this at the protocol level later on as well. So the incompatibility is that Sept 30- the server that implements Sept 30 because of signing transactions for multiple accounts, it has this rule to find intercept that says that the server should only sign a transaction if the operations and the source Canon, the source count on their transaction and the operations is for the account that's registered, the account for the signing for. And the latest version, the [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md), has things like adding signers, including other operations that have to be signed by the sponsor account. So because this transaction that adds a signer that's sponsored, it has multiple accounts signing it, as have 30 server would reject this transaction. So the short term fixed that we're thinking of + +[24:00] For at the SAP level is just to alter, that rule so that a Sept 30 server implementation can choose to allow the sauce account of operations to include some limited set of accounts that are knows that you won't be signing transactions for and that should allow, asked I'd say, the sponsor to that list of limited sub and would allow those transactions to be signed. And John also has an idea of how to deal with this in another way at the protocol level. Do you want to go? You wanna shut up? Yeah, I wasn't sure you were done talking, didn't want to interrupt you. So this harks back to a super duper old issue from actually opened by Jeremy Rubin, like out of more than two years ago it's protocol issued, number 93, if anybody's interested. But + +[25:00] Basically Germany's observation was that signing transactions in the world of Stellar is very susceptible to the confused deputy problem. And the basic idea behind the confused deputy problem- for people listening- is basically like- or at least how it applies in the world of Stellar- is suppose you have two accounts, both of which that can be signed with a single sign or a single private key. And then you look at some transaction and you and somebody asks you to sign it for one of these two accounts, but what you don't realize is that it actually contains transact operations for the other account as well and you've now signed it for both accounts and it's good to go, and now you might have authorized something that you did not mean to authorize. There are other manifestations of this as well. For example, like it could be a more complicated world where, like you, have multiple keys and somebody asks you to sign for a certain set of keys and those keys combine to sign for some third account that you didn't realize you were signing for. So at the + +[26:00] Time, Jeremy opened this issue and asked a question which was something like John will know the best way to implement this. That wasn't knowing, but at the time there was no way to implement. That was this. That wasn't annoying because there was no way to change signatures, but since David's [CAP-19](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0019.md), which got like subsumed into [CAP-15](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) and introduced the new transaction envelopes, we actually have the power to change signatures in a very clean way. And so the gist of my proposal is basically to add a new kind of transaction envelope which we'll call like, for the sake of this discussion, transaction envelope- no, confused deputy, but that's a terrible name. We wouldn't actually call it that. I don't know, I'm no good at names. Anyway, somebody else would probably think of it- and basically what we would. This transaction envelope. It would take the normal transaction, but it would require a different type of signature payload, for that matter. So transact signatures for an existing transaction like a train. Action v1 don't work here, but these + +[27:00] Signatures would basically have a new bit field wrapped into the signature payload saying which operations you're signing for and also whether you're signing for the source account, and then when you provide the decorated signature, you provide that bit field again and since the signature is over that bit field, you know that you've gotten the right data. When you verify the signature- like if you try to verify the signature- it contains that big field and so you know whether you have it or not and then you just apply the signature weight to the operations and potentially the source account that matched that bit field. So this would be like a pretty clean approach and it would allow you to sign for like every single thing in a transaction anymore. It's pretty neat. It'd be some work to do and it would require overhauling a lot of the Stellar Core signature processing stuff, but I don't think it'd be like breakingly, challenge, challenging, excuse me, but to be clear, the idea would be to try to proceed with [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) now and work on these changes later. Yes, exactly, I mean like from my perspective, as long as + +[28:00] There's a way to solve the problem and we're recognizing that, like the problem is beyond the scope of [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md). It's kind of like we could always choose to do this if people are encountering this problem all the time, whereas like if we had no proposed solution at all, then that's a lot scarier, right? I mean, I suppose we can also see how many people use some 30 and how the set level change affects people and whether it's not the problem. Legitimate, right? Yeah, I think that the reason that this incompatibility and compatibility is important is that people who use [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md)- other people we may use- accept 30 and vice versa and as comparing 20 years, both. But yeah, I think we there's no reason to change that [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) because of this problem. I think what John is saying is right. This is an underlying issue that we should address on the time and we do have a way to move forward with this up with sub 30 we have 33 together anyway. + +[29:00] Alright, it looks like there's one other issue here about changing sponsorship checks. The truth. Sorry, I was trying to remember what the what I from applying to the validity. Yeah, so, right now, the way that the [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) is written, the way that I've been implementing it, like we would accept transactions that have like, for example, like bad sandwich egg, but these transactions are 100% guaranteed to fail at the time of apply. You don't need like. Normally, when we do validity checks we're doing them on a single operation level. But like the reason for that is normally just because operations have side effects and we can't necessarily tell what they are. But for all of these sponsorship operations, like for beginning and ending in the sandwich King, like you don't need to do + +[30:00] Anything to note. Like I can look at a transaction and tell you if it's properly sandwiched without applying any of the intermediate operations. And so the question is whether we should allow these transactions to make it to apply time and then fail at that point for being improperly sandwiched, or whether we should just say like no, your transaction stupid and it's invalid. Doing that would probably require some new machinery, but like it's not conceptually hard at all, it's just new stuff to build, whereas doing it at apply time is very easy, which is why I took that approach, but it has the disadvantage of allowing people to spend money on stuff that and also spend time on the network with stuff that's like obviously not going to work. So I don't know, people have a strong feeling about this. I know I've discussed it with Nikolai before, but I don't know whether he- what I don't really know which way he wants to go with that really cold. It's worth. I + +[31:00] Would have thought we would check this kind of thing in the SDKs as well, even possibly in Horizon. I mean actually thought about it, but, as you say, it seems trivial to check. I mean like, yeah, like for me it seems like it would be a good candidate to have this as the validation step, because it- you know, I'm trying to- its a usability thing. I think. That said, you know if we think that some bitching is more like remote in kind of a niche thing, maybe it's not worth it. I mean, either way is fine. To me it's more like, yeah, trade off of were classes, + +[32:00] Because we can always change that later on to, so it's not like that's exactly, is gonna say that we always have the power to change this later. I also don't think it makes a huge difference to usability, because somebody's running these types of transactions is probably gonna be running a lot of these transactions, so they're gonna find out really early in development. Like they're not kind of- hopefully, I hope, I think I'm likely- then they kind of run like 10 000 of these transactions and then see them or fail. They're probably going to run one during testing. See a fail, fix the code. and then move on. Then there is a strong point. This is like a kind of like an enterprise tool, not really an individual user tool. Yeah, and if it is a user tool, it's probably not gonna be at a scale that is okay. So + +[33:00] It seems like the answer is: don't worry about that great. Are there any other sort of comments, questions or suggestions about? Kathy? Actually not. As I mentioned earlier, ambit worried about the locked phones clawback logic. Without it, this proposal seems incomplete. And sponsor the sponsor sponsoring entity has no means to recover its phones from government sponsored accounts. We agreed to sync on this later, so just with the record, I think it's important to articulate this towards the timeline for this feature. + +[34:00] Like this year, the desired feature list is fully parked, but next year will definitely introduce some way to deal with such situations, because companies that are willing to sponsor reserves for their client accounts must have confidence that they will be able to claim the walked funds in the future. I think it's important. But if you have some ideas about how to do this, I mean, like I would definitely welcome a proposal about how to do this. I definitely think that be a be valuable. I haven't really spent any time thinking about how to do this, other than the fact that it's like, probably possible with the current design, albeit, like, probably not that easy. But yeah, if + +[35:00] You have a like, definitely like- submitted proposal. I'll definitely look at it. So I spent like a ton of time thinking about this. I have lots of context on it, sure, okay, great. I mean I feel like, even though there's, it seems like there's still a little bit to be resolved on uncap 23 33 it's feeling like it's in a pretty good spot and obviously these are things that were intending to include in Protocol 14, and already the Horizon team and people that work on SDKs have started to think about what that means. And so I know Eric put together a working group to sort of go through some of the issues, figure out some decisions, and I think he's going to give us a summary of where they're at. Yeah, I don't have a same person except to say that we had a lot of discussions. We went + +[36:00] Through and tried to think about use cases and then we tried to think about how this stuff would actually work and we came up with problems and the CAPs and we went back and modified the CAP and then we came back again. We went round asleep a few times. It was really useful. I've linked our discussion, doc in our agenda that you can look at to see kind of the gory details, an FAQ that's coming out of this, which will be helpful for the ecosystem, I think. And the outcome now, I think, is that we have a spec that's pretty solid. So there are two issues in the go mono repo 2787 and 2788 that you can have a look up. That really described the end points that we expect in Horizon and the work that we expect to do in the SDKs and anything that's remaining that's unclear, I'm kind of satisfied- is implementation. That all + +[37:00] Problems, how do you put things in, what tables, that kind of stuff. So I'm pretty happy with it. The only other thing to mention- I'm just going to mention this briefly, I know they didn't really want me to- there is a naming question in the [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) whether we should rename these operations. There's something more clear, because one of the things that we ran into in this discussion was that we kept getting confused about what certain operations actually meant, and if we're confused then everybody else is going to be confused. This is relevant to Horizon, because we have to name our SDK functions and our own points according to what we decide the actual operations are. So there's a mailing list discussion about that. I'm hoping we'll get an agreement on it. I like the new names. I think we should keep them, but we'll need that in order to actually make the final implementation. What are the new names? And one your news, sure? Yeah, I don't have + +[38:00] An issue. Talk about the sponsoring future, as I think so. The sponsoring future reserves confirm and clear sponsorship and then update sponsorship. They were the original and the new proposed names are begin sponsoring futures as hen's sponsoring future reserves. So it's really clear that's a sandwich. And then the third operation renamed from update sponsorship to revoke sponsorship, because that third operation is signed by the person letting go of a sponsorship or letting go of paying the reserve for the account. Yeah, I agree with all three of those, particularly the third one. Yeah, that seems nice and clear. I mean there's a lot of letters and some of those names. For super clear, we're gonna hit our 80 character limit. Yeah, on the lines of the + +[39:00] HDR file after like one word. So as far. as you can see right now from your point, of view, and no surprise database migrations. It looks good to me. Yeah, I'm pretty happy with it. Cool. Any other questions about sort of prep for Protocol 14 downstream or about whatever? Just, I guess it's worth mentioning as well that I expect us to begin implementation of this in our next sprint, which will be in about a week in a house type, unless something comes up to dissuade us or ask us to spend more time. So those like pending decisions about [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) think they have an impact on that timetable. That's significant. That's more + +[40:00] Exciting, Eric, hopefully I'll have something decently functional for you guys to test off up by then. Yeah, when you can get me a cool build ok going. That looks like. There's one final small issue, which is started as issue 622: change clothes time, semantics. I think it's now [CAP-34](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0034.md) or which is in the draft. You know, you want to just summarize that real quick. Yeah, like the CAP is not yet in a good transform. It's really rough. So I'll just focus on describing at a high level what its CAP it's about, which is actually: yeah, we had like a bunch of ideas going around in the issue itself, and this is the annoyance in the current protocol. So this is not impacting the SDKs or Horizon. It's more, in fact, if the only + +[41:00] People that are getting impacted are going to be impacted in a positive way, which are people that write transactions that, in the context of smart contracts that have expiration times and basically closing a net case that nobody is probably aware of, when the right, those smart contracts that today a transaction can fail, drink on Central Time, basically consuming the sequence number even though the time bones that was specified would make this transaction invalid. So basically, like the typical scenario is when it is smart contracts is you have mutually exclusive transactions on a time bound. One is, for example, only valid before a certain date and the other is that it only after, and then those are + +[42:00] Like your two branches of execution basically. And here what we have is it's possible that it's a the first one, which is only valid before a certain time, would be accepted for consensus. Then only you would find it to fail during consensus, because the first time that was picked during consensus is actually incompatible with that conference. Actually, this is very annoying, because now, if you have this one transaction failing in your branch, that means, like normally people, what they do is they change transactions with hashes, right, and now that particular path basically becomes possible because you consume the sequence number, but she didn't actually execute that one transaction. So that smart contract is gonna be in a broken state. So it's + +[43:00] A very edge it. So it's a really an edge, case. And because it's an edge case, yeah, we want to get rid of it anyway. So the fix for it that we came up with David was during Constance is basically this: pick as close time the one that is associated with the transaction set. So normally when nodes nominate a value in general, so the value is made of transaction set, close time and upgrades. Right now, what we do is that, when we get at the end of we do is that when we get at the end of nomination, we combine all those values into some more interesting value, that is, translate a transaction set that is one of the transaction sets that made it to the end of nomination, and then a closed time that is the biggest closed time. + +[44:00] So instead, what we're going to do is going to preserve the opportunity of transaction set to close time and by preserving the affinity, what we can do is actually change the validity criteria for a transaction set. I mean up for the pair transaction set, close time to only allow transactions that are going to not be expired with the that given transaction set. So basically, it's pushing the burden through the nominated value. It sums up like the actual change is that and then will kit will basically not even include those transactions that would expire between two Ledger's in the transaction set start the gist of the update change. So it's not a big change but it potentially will make it + +[45:00] Can be a life changing for those people that may hit that Berg. Do we have actual use cases that are running into this? So we so the thing that we see those transactions fail today on the ledger, so people are running into this, it has definitely happened before it happens we don't know. In the context that people know what that we don't know and we don't even know for sure. Like contracts that you didn't even you know that didn't happen yet. Right, like that's the problem with those things, that they are pre signs, we don't know yet what. So fact, here's my concern. So I agree this is a problem and I think there are a number of ways to get fix this problem, including this, which seems like a plausible way. Another way would be the kind of signed operations proposal that I made just on + +[46:00] The mailing list. But the problem is that all of these things, they eliminate some j and introduce other edge cases, and so I'm just a little apprehensive about doing it sort of in the abstract, because so, for example, this new proposal would definitely would facility Ned case where, like you know, you've got a payment channel and you know you submitted something and it, like, expired and, like you know, you would have submitted something else otherwise, and now you're- sequence numbers are messed up. On the other hand, under the current proposal, there's a danger that if someone is doing like atomic cross chain swaps, that it would work today, and with this proposal, if there's a network outage, you could actually lose money, right, because no, like this is fixing a niche case where you're guaranteed that the transaction states that it will not + +[47:00] Exclude transactions that would succeed. So here's so, ok, so the fact. So either I'm misunderstanding it or you're misunderstanding me. Either way, it's in its. It's an indication that this is an edge case, which is so. Imagine that what we're trying to do is we're going to swap lumens for Bitcoin, right, and so what I need to do is I'm gonna claim my lumens and, in order of my to claim my lumens, I'm gonna use a hash X signature and by disclosing that, someone's gonna be able to like claim their Bitcoin, right, and I have to do my thing by certain deadline, and then the Bitcoin person has an extra hour to like take the preimage that they've found in the Stellar transaction and use out the claim the Bitcoin on the Bitcoin network, right. So under this new proposal, if some, if the Stellar network goes down for an hour, it's possible for an operation to actually execute, even though it was supposed to execute at most one hour ago. Right, and so you know again. So this will require Dawson. The network are getting a + +[48:00] Little bit lucky with the time of a of an outage, but if it's, if you're trading enough Bitcoin for lumens, that's something you'd be concerned with. So again, it's not, I'm not saying it's a bad idea, but I'm saying it causes potentially other problems that people- now I mean not that we're sending what you're describing, because if I don't already exists, David, yeah, this problem already exists. This I, we messaged Justin about this like a very similar problem this morning, like two hours ago, like right now. Imagine this like super terrible thing happened. Imagine that you submitted a bunch of transactions- you know, like I submitted some transactions, Nicola, David, Lee, OrbitLens, Justin- to like we also made a bunch of transactions. And like Eric also submits a transaction, but Eric's name starts with E and that means that he's malicious. In conventional you know crypto stories and Eric knows of some zero day that allows you to crash the network at externalize time. And so what happens is like Eric's transaction gets into the ledger, all of our transactions + +[49:00] Get into the ledger, and now the network is dead, like it crashes during you know ledger closed and nobody externalizes. As a consequence, everybody crashes. Now, like when you restart the network it's gonna crash again because you are committed to this transaction set. So the only thing you can do is update the software. Blah, let's say this takes like 36 to 48 hours probably to get everybody back up and run. It would be like, I think, a reasonable guess, maybe even a conservative guess, but like you should not consider any transaction expired on the Stellar network until you've seen a close time past that time, you can seen a close time, past that time, you can say that transaction is expired. But until then you cannot. Because no matter what model we use, like, it's possible that the difference between like the clock close time when the like ledger actually externalizes and when the like a long time like the Stellar header close time could be like arbitrarily far apart. So this problem exists today. Like if you get into the + +[50:00] Situation like that transaction might execute until that ledger closed timestamps. I think this is a different problem, so this is also a problem. But the problem I was concerned with, is that a transaction with an older timestamp might not get disclosed until long after that timestamp. Right, and if the contents of that transaction unlock something on a different blockchain, then that's a problem. Can you give some more background on this. I'm not sure I understand how that would sure like so. Okay. So basically, we propose a block with a particular time, right, and that time was an hour ago because- and in my case you don't need a software fault- it could be like some botnet attack Stellar + +[51:00] Or something, right? So it takes us an hour to like recover from this thing and now we're going to execute transactions that were like an hour old because, like, it's still the biggest block that's been nominated, right, even though, at this point, like, everybody should be nominating everything. But it could be that the block an hour ago was like the biggest blog- nobody's heard of neuro transactions, right? So, even though other people should be nominating like higher timestamps, we- you know well how it will execute- like an old timestamp, and so people might learn about a transaction with the timestamp of 11 00 a m at like 12 00 p m but like shouldn't you only vote for a transaction set or a Stellar value that has a timestamp that's close to your clock time so like if everybody thinks that transaction is super old nobody should vote for it because it doesn't have a good close time no you + +[52:00] Should vote for anything anyone elses and any one of your leaders is voting for only if and you can take and currently yeah only if it's valid actions should we consider a transaction with a super old closed time valid do we even do that to walk a block with this a value with a super old yeah that's my mentor value I don't think we'd have that even happens today I think that if it's older than like 60 seconds it's dead nickel I would know the best so I'm gonna let me Claude take it can be any clock in the between the lapped ledger and the current time all this that can be introduced as a potential but we pick the highest anyways so Jeff people repetition so I think that what that my point is that no II don't think yeah no change like that like the likely close time to win in that situation is going to be appropriately + +[53:00] Current time oh okay that's confusing actually hold on that's not what I inferred from the proposal no I knew were close time with a worse transaction set no is it so no but like the it's when you have nodes nominating you know a pair transaction set close time the close time that the nominate did you introduce on the network is there local clock time right so that's the so actually if you walked out for like an hour you know during the last ledger that new ledger will be an hour later obviously like compared to tie so is it's actually snapping to the current time it doesn't but a malicious node could say that it nominated something with an old time right like is there anything stopping them from doing that these are well there's a race right so like the gos + +[54:00] Can happen at any node can be do s right so any node can introduce a closed time so in this particular situation the change of pick being I guess that's whether the changes you may end up with picking the transaction set a closed time pair from one node that actually decided that it was not going to nominate something that was close to the current time like instead something in the past you know but close to the bigger than the last time I just wanted to say by the way that were fast approaching the end of the hour so I don't necessarily want to cut this discussion off but at some point I think we'll stop the livestream and figure out the best way to have this discussion move forward yeah since people I mean basically what I would like to do is I would like to either have you know + +[55:00] A little bit more data about like sort of smart contracts and stuff just to make sure that we're not sort of solving one problem and creating another and second of all maybe a little bit broader discussion because the point is like these sequence numbers are actually like annoying in more ways than one and so it could be that if we solve the sequence number problem then this issue goes away and we haven't had to also create this potential annoyance for like atomic cross chain swaps so there is already a way to avoid this sequence number problem I actually described it an issue 6 22 like three or four hours ago but the basic concept here is like instead of letting your transactions have sequential sequence numbers they have gapped sequence numbers where like you can choose a gap of one for example and then the transaction just needs to have a bump seek and so if it fails you know that it didn't happen and that would solve so many other it would like solve a whole bunch of other problems + +[56:00] Too so it's like I'd way rather just like kind of address that problem rather than just do this one thing that's targeting this one very specific problem which we don't even know is exactly the problem that people are having in practice we know it running into these four regular infections no but all we know is that some transactions are getting submitted that have a bad close time right but we don't know that's like messing up people's smart contracts and you know those people may not care that they're losing a you know 100 micro or 10 micro lumen fee or whatever like it just might not be a big deal right way so you want to put a face to those transactions yeah there'd be so much upside to actually allowing the gap sequence numbers like because that would solve all these other problems too so like if issue like here what we are talking about is that there might be smart contracts out there that are + +[57:00] Broken because the way things are actually specified is wrong right and what I'm telling you said if you lock gap sequence numbers so many it will solve so many other understand with including this problem right no you have the same there's only would affect our new one yeah new ones would be fixed but you wouldn't fix old we don't know that there's any smart contracts in use you can't say that we don't know sure but like let's let people complain about a problem before we contract well describing here is the very like this I think in most examples of smart contracts the you know branch on time like you want something but it you know I branch before some time and a different branch on the different best like the like 101 I would + +[58:00] Argue it is I would argue smart contract when I want is the cross chain swap right well I think yeah like you bring up a super good point here which is like anything that we're proposing as a solution here like we better test it to make sure that it like it should be perrito better right like if I'm not happy with the solution that six is this problem but makes you know cross chain swaps worse so like probably like mmm I don't know if it doesn't I think we should do was the thing right we're going to have a conversation on the dev mailing list for this one and David if you can show us how are these breaks Atomics I mean across as change swap happy to fix things then break things so Jonathan John if you have another idea for how did you CAP sequence numbers side spices signed I + +[59:00] Had that one frozen for the signed operations but basically something simpler like I think I'd rather just argue if we have something that like I can argue fixes this problem but also fixes other problems I'd rather spend my efforts arguing favor something we still have the same problem that's the problem with any other solution we will keep the cruise time broken comes with respect we don't know if we have gaps sequence numbers then there's no issue because I can basically sir so right now in the back we say if you have but time bounds they've drawn at the bad time bounds the transaction is embedded that's what the spec says and we're actually not doing that feels like it feels like right place to end up to him I think we should yeah I'm right there any of this one 622 thanks everybody for watching that's the end of this week's Open Protocol Meeting see you in two weeks + +
diff --git a/meetings/2020-07-10.mdx b/meetings/2020-07-10.mdx new file mode 100644 index 0000000000..d0041066b3 --- /dev/null +++ b/meetings/2020-07-10.mdx @@ -0,0 +1,162 @@ +--- +title: "Turing Complete Contract Proposal for Stellar" +description: "A community proposal exploring decentralized, Turing-complete transaction creation on Stellar through programmable signing servers, enabling trust-minimized smart contract workflows without altering core consensus." +authors: [kalepail] +tags: [tutorial] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This community session introduces a draft Stellar Ecosystem Proposal (SEP) aimed at bringing Turing-complete smart contract capabilities to Stellar without embedding a virtual machine into the protocol itself. Rather than executing arbitrary code on-chain, the proposal focuses on decentralized transaction creation using external but standardized services called Turing Signing Servers. These servers generate partially signed transactions according to programmable contract logic, leaving final execution to Stellar’s existing consensus layer. + +The core motivation is functional feature parity with Ethereum in the financial domain, while preserving Stellar’s strengths: predictable execution, asset-first design, and strong safety guarantees. By separating transaction creation from execution, the model allows complex business logic, trust minimization, and programmability without exposing accounts to arbitrary execution risk. Contracts define what may happen, while users retain final control over whether transactions are submitted. + +### Key Topics + +- Motivation and framing: + - Distinction between functional feature parity and protocol-level feature parity with Ethereum. + - Emphasis on finance-native use cases where Stellar already excels. +- Transaction creation vs execution: + - Stellar already provides decentralized execution but relies on off-chain transaction creation. + - The proposal decentralizes creation using programmable, deterministic contract logic. +- Turing Signing Servers: + - External services that host contracts and generate partially signed transactions. + - Incentivized via fees, disinterested in outcomes, and designed to be run by many independent operators. +- Contract architecture: + - Three roles: users, contract creators, and Turing Signing Servers. + - Contracts are uploaded once and replicated across multiple servers for redundancy and trust minimization. +- Multi-signature and thresholds: + - Contracts rely on Stellar’s native multisig and thresholds to prevent unilateral control. + - No single server can act alone; collusion is mitigated by requiring matching signatures. +- Safety model: + - Transactions generated by contracts are never executed automatically. + - Turing Signing Servers only create and partially sign transactions according to predefined logic. + - Users retain final authority to inspect, sign, or reject any transaction before submission. +- Demonstrated examples: + - Vending machine swaps (asset-for-asset exchanges). + - Oracle-style contracts using external APIs (e.g., weather-based payouts). + - Account-level filters limiting payment size. + - Wager pools and donation pools with automated payout logic. + - Recurring revenue and subscription-style payments without pre-signed transactions. +- Ecosystem implications: + - Reduced trust in centralized service operators. + - Potential applications in DeFi, anchors, liquidity provision, and subscriptions. + - Compatibility with existing Stellar tooling and protocol guarantees. + +### Resources + +- [Stellar Ecosystem Proposals](https://github.com/stellar/stellar-protocol) +- [SEP-30: Account Recovery](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0030.md) + +
+ Video Transcript + +[00:00] All righty. Welcome to another episode of engineering talks. My name is Tyler van der Hooven and I'm gonna be talking today about a preset proposal for turing complete smart contracts for Stellar. Precept simply means that this is just some work that I've been doing on a Stellar Ecosystem Proposal around this idea of turing complete contracts and we're going to talk a lot about that today. Obviously, that's what the talk is all about, but it's important to kind of reset your thinking a little bit. We all have an idea of what turing complete or smart contracts means. I'm gonna try and come into this without too much baggage and explain what I mean a bit along the way, and I'm going to be using + +[01:00] Ethereum as our comparison project, because most of us, when we think of smart contracts, have etherium in mind. So this is not necessarily a competition against etherium. It's simply a comparison of features that, if this SEP were accepted and used, how would you? How you would accomplish similar sorts of things that you could on etherium on Stellar, and comparing the functionalities and features that Ethereum offers with what this Turing signing server SEP would allow you to accomplish on Stellar. So let's dive in. Today's gonna be a bit technical. We're gonna go through Trent, blazed through these slides pretty quick and then get into some examples and encode, but it will be a little bit longer probably than we're used to for these engineering talks. To stick with me, it is quite interesting, super excited. I've spent a lot of time thinking and working on this problem myself as something that I very much need in my own development + +[02:00] And I think would really benefit the Stellar community at large. All right, so the problem is, if the room has market dominance in an arena we're better suited to serve, namely Finance, so we have assets and the ability to have assets as first class citizens where there in an account. But often times financial architecture will be built in Ethereum in instances where oftentimes it would be better to have that built in Stellar's, it's going to be faster, cheaper and you're going to have this capabilities and functionalities that are native in Ethereum and you'll have to kind of custom build that yourself, whereas in Stellar they will often be built in kind of at the base layer. So our solution ears to appeal to Ethereum developers by bringing functional feature parity and that's a key phrase here: functional feature parity. This isn't feature parity, it's functional. So what you can accomplish in etherium trying to allow that same type of + +[03:00] Functionality in Stellar, in the financial arena, and so the solution there for me, the product there for me, I guess, is smart contracts. Now we have a concept of smart contracts and seller. It's kind of using operations and this nature of Stellar transactions you can accomplish a lot of different things atomically inside of a transaction. So you can have lots of different operations that open things up and close things off all within the same transaction. You don't have to do a bunch of individual things and then who knows what's going to happen inside, because lots of things can happen in a single ledger. You can accomplish a lot of very intelligent, smart things and by using things like time bounds or pre assigned transactions you can get away with a lot of, I would say, complex big business logic and we've sort of slapped some our contracts on to that. But in no way is that what most of the world thinks of- at least most of the blockchain world thinks of- when they think of smart contracts as a concept- is very different than what we tend to + +[04:00] Talk about when we talk about Stellar smart contracts today. So we're gonna be moving away from what most of us think of when we think of Stellar smart contracts and moving much closer to what Ethereum means when it says smart contracts. So my definition is a smart contract as the ecosystem understands it. So, regardless of what wikipedia says, the smart contract is the way that smart contract- the word phrase is used- as the ecosystem at large, the blockchain ecosystem, understands. It is an arbitrary turing complete program. It's in complete programming, executed on a decentralized network for the purpose of minimizing trust. So you have this idea of a program that has arbitrary programming, that does who knows what, that accomplishes some sort of goal, and that goal is better accomplished when it's trust minimized or there isn't a lot of third party trust involved, or it's decentralized. And this is + +[05:00] Important to kind of for all of us to agree- even if you don't necessarily agree with me on this- that this is where I'm coming from when I talk about the smart contracts that I'm envisioning through this SEP that I'm building, is this idea of trust: minimize, minimizing trust, so there will still be trust involved. There's always trust involved. It's just who and how much and what are those parties that you're trusting, and then arbitrator and complete programming: the ability to do kind of any sort of logic that you need to create some sort of output. Instead, you don't have that. Stellar is not turing complete that allows it to be very fast and flexible in scale very quickly, whereas an Ethereum - those things are bundled together where you have what we'll get to in a minute: the idea of transaction creation and execution. So what does that really mean? Was this ecosystem doing with smart contracts? There are two things when it comes to financial smart + +[06:00] Contracts: there's transaction creation and transaction execution. So building blockchain mutations. Basically, these are the instructions for how I want to change the existing state of the ledger or blockchain by adding another block, and then also the transaction execution, which takes those instructions, takes those mutations and actually runs them to perform the changes. So you have the instructions and the execution, the creation in the execution. So we have that right in Stellar and a theorem, you can both create transactions and execute transactions both in Stellar and Ethereum . So what's the big deal? How is what's the problem? Well, the problem is decentralization. In Stellar there is no decentralized transaction. Create atheria m--. There is because in the virtual machines that run Ethereum - those are run all over the world, anybody that's running an ethereal node and participating in the consensus- the + +[07:00] Transaction creation and execution both happen in that process of arriving at consensus, whereas in Stellar only the execution of the transaction, the creation of that transaction, happens outside of the execution. So you sort of give it very detailed in permissible instructions and then send it to the consensus protocol to actually execute that. In Ethereum you send a very minimum information and then both the creation and execution- because assets are not first class citizens, you can build a whole program that will handle the kind of input that anybody might send it and then the ledger for your particular smart contract will be updated. So both of those things happen the same time. It would be one of the reasons that if your iam is built the way that it is and why some of the reasons or ways that it differs from something like Stellar, which what I'm going to get into that too deeply and I honestly don't understand it perfectly. But with my new, this set proposal that I've been working on, we're gonna look at + +[08:00] Today, we're looking at adding decentralized transaction creation so smart contracts do not execute. This is really important. It very something is very different than Ethereum . All that on adding or proposing that we add through this step is transact, decentralized transaction creation, which we will then tag on to the existing architecture we already have within Stellar, the decentralized transaction execution which would give us through this, what I'm tagging is SEP acts. if we add seller and this new sub X together, we get feature, functional feature parity with Ethereum , where we have decentralized transaction creation and decentralized transaction execution, which would be amazing and we'll get to why- hopefully- maybe some of your wheels are starting to turn on- what decentralized transaction creation would allow us to do some of the things that it would allow us to accomplish, which we cannot do now, but we'll get into that. So essentially, sub X is Turing signing servers, decentralized turing complete + +[09:00] Trying action creation. So a few weeks ago we had a engineering talk on Sept 30, which is the key recovery servers, and we have Federation servers as well. So we have this concept within solar ecosystem proposals of external servers or services which run to provide sort of additional logic or additional functionality, common additional functionality across the ecosystem, and so this would be an additional server that would be running, the individuals or enterprises would run, which would allow this decentralized, turing complete transaction creation where you have these multiple parties running, something which will accomplish some goal in a decentralized way. So still, the smart contracts via sip X have three entities: they have users, contract creators and Turing server. So some of these may be + +[10:00] Blurred at times the line between the two, but they are distinct in the sense that regardless of whether they end up being the same entity or individual, they will be serving three separate purposes. So users submit transactions for final execution, the final there, the final interested party. They have something that they want to accomplish, and they interface with touring servers, but it's typically transacting through a contract, not the Turing server. So they'll be contracts that users interact with and those contracts are hosted on the Turing servers. So I will interact with a contract which is hosted on multiple turing servers, and then there are contract creators, obviously, who create the contracts and either provide services or just innocuous features. So we'll get to some of those in a minute. But a contract creator doesn't necessarily have to be some sort of service provider. They can just build a contract that anyone can implement and + +[11:00] Use to provide some additional decentralized functionality, and so I have a diagram here which is 100% useless, so we are going to break that down. User wants to interact with a contract creator for the service right there providing save users and contract creators. We have this concept already today, where you have some third party that you're wanting to interact with your Stellar address, whether you're making a payment or you want to provide some sort of functionality of the service providers offering, and you have a very one to one relationship. You send instructions or requests, you interact with their UI and they send you back responses, whether that sign this transaction or add this trust line or whatever that interaction might be. It's very much one to one and this is where some of the issues are going to arrive. This is unacceptable as a requirement. Oftentimes this will be fine, but I think it's not the right thing to say. This is the way it has to be right. So it's + +[12:00] Unacceptable as a requirement as it leaves the user trusting a single entity and a contract creator liable for a high level of responsibility and involvement and ultimately it's not decentralized. You could add more servers, but ultimately the contract creator is controlling all of those servers and decentralization, like at its core, is mitigating trust or taking that trust and splitting it amongst multiple parties so that no one entity controls everything. We split that up to where there's a level of trust, because no one entity is the single trusted entity, and so we are getting to that diagram I showed a little bit before we get. We introduced- if they're disinterested but incentivized- third party service, the Turing signing server, which will act as a permission request arbitrator between users and contract creators. To the server we upload a strict, specific turing complete program or smart contract which will create transactions. They don't execute, they create transactions. So the contract creator will build a program which will create + +[13:00] Outputs. It's basically just in this. In my case it's going to be a lambda function, but ultimately it's just an API endpoint which, when called, takes an input and then outputs a valid transaction. Stellar transaction and then the Turing signing server, as you can see here, stores contracts and holds keys, and those keys are seller keys which sign for the transaction that the contract generates, and in Stellar we have this concept. The reason this works or is valuable is because in Stellar we have the concept of multi sig where you've got multiple signers which can be added to accounts and can sign for any transaction that's being originating or originates from that account. So if I want to make a payment, I can sign with my key, or I can sign with somebody else's key if they've been added as a signer to my account. And so as we start to, we create this contract and we + +[14:00] Uploaded to a turing signing server. When we upload it, a signing key is sent back to us, and that signing key is what we upload to our contract. We add that signature to our contract or we gave it to our user to add to their accounts so that we can sign for some action off of their account. Now, when we have just one Turing signing server, we're going to see that doesn't get us any improvements, but it's the duplicity, the lots of Turing signing servers where we start to see the power come out. but this allows for transactions and their signatures to be generated following the strictness of a codified contract rather than the trust of a contract creator entity- and this is the big point here. That's may seem a little bit abstract, but it's really powerful because if I want to do something- let's say I want under very strict, specific circumstances- I want you to be able to use my account to pay + +[15:00] Yourself a hundred dollars. You know, if some parameter is met, I can't ever give you my secret key, because if I give you that you can do much more than just pay yourself a hundred dollars. So how can I give you access to my account without giving you complete access to my account? It's only strict, specific, turing complete access to my account. Well, I get that through this contract creation, where I upload a contract to a Turing signing server and then you interact with that Turing signing server and, rather than you having access to the secret seed, you have access to only perfectly signed transactions from those turning signing servers. So I don't give you a secret key, I give you signed transactions through these Turing signing servers and then you can interact with the Turing signing servers rather than directly with my account. And that's kind of one example, but hopefully that starts to make sense. + +[16:00] So the reason this becomes, or how this becomes, decentralized is we upload these contracts to lots of different Turing signing servers, so it's the same contract across lots of different Turing signing servers, and then through thresholds- right, so we're out of T signers, multi sig to accounts, but each of those can have a threshold. So I can say: to make a payment, you need a threshold of three, so I need at least a weight of three to be able to make a payment. And so when I upload all this same contract, all of these turns signing servers, they all send me back a key which I add to my account as signers, but only with a weight of one, so that no one turing signing server, not even two turing signing servers, could sign for or collude to do something with my account. You would need at least three and through thresholds, you can add any number of, I guess, up to 20 thresholds to agree very complex, secure logic to + +[17:00] Account for whatever kind of logic you're trying to perform with your contract. But essentially you're just going to upload this to a number of turing signing servers and then, through thresholds, decide how many of those turing signing servers need to add their signature to the transaction that's being requested. And because it's the same contract, it's producing the same transaction, the same XDR, and if any one of these gets a little sneaky and tries to, you know, make a payment to themself, they would change the XDR and therefore the signature for all the other turing signing servers would be invalid. If the requests that the user is making to the turning signing servers is different than any of the others, it will be rejected, it'll be spat out because it doesn't match any of the others and the signature therefore would be invalid when it finally comes to submission time. And so now the contract creator is a nonentity they no longer really interact with. The user interacts directly with the Turing signing server. So contract + +[18:00] Creators don't even need to, at this point, run a server. They just uploaded some arbitrary JavaScript logic or really any programming logic, language logic, to these Turing signing servers. And, so long as it's valid and producing good transactions and being used and not being nefarious, the user can interact with Turing signing servers completely separate from the contract creator, in a decentralized manner, getting transactions which follow the pattern that the contract creator allowed for because they've added their signature to it and, in this case, following our example before, the user can begin interacting and making payments to themselves for 100 so long as it follows the pattern that the contract creator allowed for. So hopefully it's starting to make sense. Turning signing servers allow for secure, decentralized transaction creation and signing through programmable contracts. So again we need to remember that Turing signing servers their goal is to create transactions, not to submit + +[19:00] Them, so they perform the business of creating a valid signed transaction and that signing doesn't have to be- and often I would say is not- complete. It's partial signing, it's signing for some of the logic of the transaction that was performed, but there's- probably should be- some piece that's left to the user, whether that's paying the base fee, whether that's in a- you know, a vending machine example- you have to pay a hundred lumens to get a hundred of your asset back, like Ana ICO model, or some sort of token giveaway, or there's lots of different instances where you're, the user is going to be the final signer, and so the transaction that comes back is signed and it's signed enough for all the interaction that deals with the contract creators account, but there's still some piece that's left for the user account. So it's just adding signatures to a transaction, but not necessarily fully signed, although that obviously could be. Again, the business + +[20:00] Logic appears in the contract, not in the XDR itself, not in the Stellar transaction itself, so you can create all kinds of business logic that produces a transaction. At the end of the day, that could be any sort of thing and that's kind of what we're gonna look through here now. So, again, like the motto, for this is you get what you permit. So when you start talking about, could I do this or could this be used in this case, the answer is going to long as is related to, like seller transactions and getting a payment and kind of really getting access to your account through a secure contract, like so long as that's the question, the answer is yes, because so long as you create the right permissions, it's you can allow for it. You can build for something like that amazing. The houses, compared to etherium smart contracts, will just look through this really quick. So now with this, FX would have, along with the 3m, decentralized transaction creation and + +[21:00] Decentralized transaction execution, and then the difference here, arguably, is going to be again the safety over liveness that we have within the selection, since it's protocol, versus if you're in virtual machines anyway, where the safety aspect to the contract control is it's just creating transactions, it's not actually executing them, and so you don't have to necessarily worry about your accounts or about the safety of your account so long as you've built your contract correctly. And then the liveness aspect of it is going to come down to how well did you write your contract, is your account funded, all the things that are already limitations just within Stellar, versus the ethereal model which is, once you upload a contract, there's nothing you can do to really alter that, because both transaction creation and execution are happening at the same time. But that contract will always exist, it will always be up, unless you have some of those non decentralized back doors open. So some of the core philosophical design + +[22:00] Differences: Stellar, smart contracts: nothing unacceptable can happen. In a favor smart contracts, nothing unexpected can happen and there's benefits on both sides. Really depends on what you're trying to build for those Stellar smart contracts, because that is just generating a transaction, the final execution is in the hands of whoever the final user is, whereas in Ethereum , smart contracts, the final execution is in the hands of the contract. So you execute it, hoping that it does what you expect, whereas with a social on card track, you execute it and then it spits out a final transaction which you have a final say and whether or not you want to submit it. So, finally, the thing that we were all looking forward to, examples, and I think, yes, that is pretty much my last slide. I'm going to switch now to my browser. I'm going to open- I've got some glitch demos here. So fantastic little service here, glitch, and I'm going to open up all of these examples and then make this a + +[23:00] Little bit bigger. So the first one I'd like to look at is a very basic kind of run of the mill vending machine example. So when we talk about smart contracts, vending machines- kind of a really good starting point where you give it an input and it gives a guaranteed output. So in this case we're running a Tyler coin trade or swap where you send in a hundred lumens and you get a hundred Tyler coin back, or it's always a one to one so if you did 50, it'd be 50 one to one so if you did 50, it'd be 50 if you'd tend to be 10. So I've got an account here. So if I can open this, I've got an account here which I am making a payment tune, and this is just a standard seller account that I've funded with friend bot. There's nothing fancy that's been added to this. There's a contract, though, that I've uploaded that says all the logic around: when a transaction come + +[24:00] Or when a request comes in to this contract, take a hundred lumens, make a operation for taking a hundred lumens from this account and then send it ten- all right, send it ten top corner 100 for 101 to one, and so we'll give it that hundred, and then we'll generate the XDR. So let me just pop in the code for this real quick. We make that a little bigger. So this would be our vending machine smart contract. So, essentially, there's a contract that says when a request comes in and it has source and amount, we're just going to build a Stellar transaction and then spit out the xgr and that's going to be a payment for 100 lumens to me and then send back tot it going to the source account, which is the account that we add right here, and then it also adds the fee payment to the + +[25:00] Turrets. So to the Turing signing servers. again I mentioned that they are disinterested but incentivize third parties. They have no benefit to get from collusion but they have benefit to get from collecting on fees. So if you run a Turing signing server, you can charge fees to execute these contracts as well as upload contracts. So transact servers have a business opportunity but they're just interested in the sense that they don't care what contracts are being run. So this is a very- I mean 57 lines of code plus comments. So very small little contract. But it does allow arbitrary logic and it allows me to safely say I can run an entire contract here of paying out my Tyler coin in exchange for XLM, all through just this very basic JavaScript code. So I'm going to upload this. When I upload this contract to a Turing signing server, they are going to send back a signing key because this is + +[26:00] The account, this is the contract address that is going to be paying out Tyler coin. If we look this up in still our experts on the test net, we will see the windows condemn. When we upload it to the Turing signing servers, they send back signing keys and those signing keys are what we add to this contract to allow for these contracts to actually generate signed transactions. Because the Turing signing server is going to generate an XDR and then add a signature using whichever signing key they have for that contract. And then we have a threshold of three, like I mentioned before. So if three, so long as three addresses sign off on this, any transactions coming from this contract, we can generate a valid signature and the execution the transaction will go through, in this case paying out toilet claim. So let's go back here and generate that xtr, that generate + +[27:00] XE. Our button is going to call the Turing signing servers. It's going to say: hey, this account would like to generate an ex TR and it looks like we have got a, an ex TR and it looks like we have got a beautiful error. So you know, so is the way it goes. But essentially what would have happened if it had worked would be: the contract goes out to all of those different addresses, all those different Turing signing server sends back the same XDR with the signature. The signatures get added to the transaction that comes in. We add our final signature to the ex TR to the transaction and then we can submit it right. So we have to. We have this account. We would have to sign for that hundred lumen payment. It pays out the hundred lumens and then the signatures that we received from the Turing signing servers for the Tyler coin are all valid. All the signatures are there. We can make this relatively complex payment all through a very simple Turing signing server smart + +[28:00] Contract. So that would be our vending machine example. The next one would be. Let's do this weather coin, so this one kind of showcases, but I would call Oracle eyes data. But really it's just taking a third party API and using that information to generate a transactions, a different transaction or our variable transaction. So in this case we are using the dark sky API. We are looking up the weather at my current location and if it's raining, we are going to generate a transaction for rain coin, and if it's sunny outside, we're gonna generate a transaction that pays out Sun coin, and so again we need the same address. When I upload this, the same thing happens. Each sharing sighting server sends back a different, a new signing key. We had that signing key to the contract and then the contract, when we call it, will generate signatures for this particular transaction. So hopefully this + +[29:00] One will work. It's not raining at my house at the moment, so unfortunately you can't get a any rain coin. So in this case it's looking it up and it's saying, because it's raining, we're not going to be able to get any rain coin. So if it was raining, you'd be able to get rain coin. But because it's not theirs, I think there's logic actually running in the glitch app which says: if it's not raining, don't generate the XDR. So maybe a little bit of a letdown, but still you can provide arbitrary logic to say, because it's not raining in my house right now, we're going to spit out and reject this transaction, which is really cool because you allow that logic to say: I'm going to give away like usage of my account, but only under certain parameters. And in this case it's an arbitrary third party API which is providing the information. That's as well. It's not raining, and so because it's not raining, we're not going to generate a signature for it. We're throwing that out, which is cool. The next one would be the filter + +[30:00] Smart contract, and this one's actually a little bit different because, instead of adding signatures to a contract address, we're actually adding signatures to an account, to a user's account, and this is one of those examples where a contract- the ones we've looked at so far have been sort of third party services, so somebody's actually trying to run a business off of smart contracts. In this case, somebody just created a random- you know, open source- smart contract which provides filtering functionality, and so maybe I have an account that I want to give access to someone for, but only if they're XLM- payments are below a hundred lumens. So I've made a filter smart contract here which basically says: when a transaction comes in, so the input here is actually in xtr. So we're going to generate a Steller's transaction which has to have just one operation, which is + +[31:00] A payment, and it has to be an amount- that is less than 100, because if it's great, greater than 100 lumens, the request will be rejected. But in this case we actually want to attach this contract to a user's account, not to a contract account. So there isn't really a contract address here. Somebody's just created this contract and says, hey, I'm gonna upload this and if you would like to get this to use it, if you would like to get this filtering functionality for your account, then you just need to add the signing keys to your address. So if we take this xtr here, which is a valid example- we can look up the account that it's attached to. So we've added the signing keys to this account and right now there's just two. So we've uploaded this contract to tutoring servers, taking their signing keys and attach them to our account. We + +[32:00] Have a threshold of 3, which means we need all three of these, both turning signing servers and then our own TFI signing key to sign off on, and then we kind of have this escape base64 or sha 256 hash that if something terrible happens and we need to rescue this account, we kind of have that in our back pocket. That's not a turning signing server thing, that's just good practice if you've kind of locked out your account to yourself. But anyway, sharing signing servers are going to sign for two. So by themselves these turning saying servers can't do anything. There's no collusion capability possibility because at most, they would only have a weight of two, which isn't enough to do anything. They would need that final signer, my sign, my signature- to actually do anything. So, however, I can't also, by myself, do anything. This account is useless on its own. It needs those other tutoring signing servers to actually accomplish anything. And those turning signing servers are locked in to only signing for this, which is that the + +[33:00] Transaction is greater than or is less than 100 limits. So if we go back here, we can see this payment is for 100 XLM. So if we go and we generate the xtr on this- all of my examples are today- this is not fantastic- and we try the invalid one. It works. It doesn't. It filters a transaction here looks like this one's not working. I think I may have uploaded the beta X beta. Let me look real quick at the contract address and make sure I'm actually using the right contractors, because this one is kind of cool, so we edit. our project real quick in our JavaScript. Know it's got the right one. Yeah, who knows, + +[34:00] Probably shouldn't have fooled with it too much before doing the demo. But anyway, if you had a valid example which we- this is valid X? Er but looks like something's up with my Turing signing servers, this would be valid. It would be signed. You could view it in the laboratory and then submit the transaction to the network, so long as it was under that hundred. One more example here: this on glitch is our hourly wager demo. So I play video games occasionally and on Fridays will do wager pools. So maybe, like you know, 10 lumens or 20 bucks or something like that, and there that all gets pulled together and whoever wins the game gets the pool. You could also see it as a donation pool where, or even like a Kickstarter or something where, if + +[35:00] Balance reaches over a certain threshold and the money's released. There's lots of different smart contract logic that comes through these types of functionalities. But you don't want a single entity holding all those funds with the risk that they could run away, and so you might add multiple human signers to that. But if one of them gets bitter and doesn't want to do the thing, that, you know I go you cheated and it's I'm not going to sign off on this transaction where you could run it to issues, when you have those human variables. But if it's just machines and they're just following contract logic, you remove that layer, that risk and do the perform, that trust minimized aspect of things, which is really what we're after and what turning signing servers exists for. And so in this case, you have a contract address which has the Turing sang service been uploaded to all of the turning signing servers, with threshold of three again. So you need at least three of these Turing signing servers and then individuals will start to pay this account. You can see the payments coming into this account, into the wager pool, and then, in + +[36:00] This case, every hour the contract will go through, you'll be able to generate an xgr which will pick at random one of these individuals as the winner and payout the pool to them. So, again, if we go to our arrow, the wager contract- it's a little bit longer contract, but not really still 100 lines- where it takes into account the operations for this contract account, looking at the payments, and then we'll randomly select one of those as a winner every hour. So those are examples. In that case, again, you upload the signers, in this case to the contract, like we did before. The last one that I think is really interesting is a recurring revenue model. So one of the issues right now with recurring revenue models on Stellar is that you have to pre sign loads of transactions and kind of bump sequence numbers and it gets quite hairy when it actually comes time + +[37:00] To execute those, as they have to give to pre sign loads of transactions, especially if it's like shorter terms, on weekly or monthly basis, and you have a lot of these transactions just kind of sitting around hoping that they'll be valid whenever it comes time to submit them. With Turing signing servers, you can create a contract which takes into account when a payment, was last made and for how much it was when it was made, all the information around the actual payment and then, if it's time, if it's been a month since the last payment, you can make another payment and so essentially you would create this transaction much like the filter transaction or much like the filter contract. You upload this contract, add the signers to a user account, but those signers can only sign through this contract and so if it's been a month since the last payment was made, you can call this contract and it will generate a transaction which makes a thousand lumen payment. But as soon as you submit + +[38:00] That to the network, it's and you try and call it again and say, hey, I just want to keep collecting thousands of lumens from this account, it's going to say, hey, it hasn't been a month since the last time we ran, since we ran this contract. So in that case you don't have to have any pre-signed transactions. You simply wait until the time comes, you generate the transaction and you can actually generate it and submit it through some sort of third party service. The user wouldn't necessarily ever have to know, other than they would still have ownership over their account and if at any point in the future they wanted to cancel their subscription, they would simply remove your turning signing signers from their account and any calls to that account to make a payment would fail because the user removed this turning signing server signers from their account. So I think, if this account still exists, we could actually look at + +[39:00] The signers that have been added for this account. It looks like I saw on that one. I think it's actually yeah, so we've got the two signers here, but then it's also got a threshold of two. So while we need both turning signing servers to generate that recurring payment every month if at any point the user wants to remove those signers, they're able to do that because they have a weight of two set on their operation threshold. So that's been a basic overview of what I have so far on turning signing servers. It's still a work in progress. Obviously, if that's something that seems interesting to you- and hopefully it does- I am looking for more people to get involved on actually building out smart contracts. So I have quite a few turing signing servers, just tests that turn signing server set up, they're ready to be used, ready for people to poker and see what's possible. Hopefully some + +[40:00] Of these examples have gotten your wheels turning, but now is kind of the announcement for that. We need some people to start using this thing, to start poking it around to see if this makes sense as a septa, see if we built the right thing, and then towards the as that sort of happens and we sort of get a better idea of what this should be and how it works and doesn't work and what kind of contracts can be built using this. We'll move into the actual draft, the SCP draft portion, where we write it all up. We have the API endpoints. We showcase how to build one of these servers and run it successfully and all the fees around it and how to generate revenue, how to run a secure turning to a server, but also how to create good contracts and then hopefully we can get it to an active state where it starts to be used more and becomes a normal sort of used active seller ecosystem proposal: within the ecosystem where these Streng + +[41:00] Signing servers become possible, where anybody can spin up a contract and upload it to a plethora of turing signing servers and then, beyond that, it's just the acceptance of it and it becomes a normal part of Stellar development. Alright, so that was long. Apologize for that. Apologize that the demos didn't work out great. Hopefully we'll have those back up and running so that everybody can play around with what we have so far. Should have that within the next couple of days and linked on the youtube. Let's go for questions. I hope there's a couple of questions that you guys have for me as things pop up and I will begin to answer those and then we'll close out. But as you have questions for me, feel free to type them into the chat and we'll walk through them. Otherwise, you can find me on Twitter and key base at T. Y VD H. I would love to chat. All right, how is this relevant for cross border payments? Where does this fit in decentralized finance? Well, you kind of + +[42:00] Set it there. In decentralized finance, a lot of its going to boil down to what you mean by decentralized. Everybody kind of has their own idea of what decentralized means, and when it comes to things like D- central payments for cross border payments or decentralized finance for cross border payments, it's gonna come down to like who's in charge, who's in control and what are the differences? What layer's do you have when you're talking about ownership? So in the case of an anchor, for example, and wanting to facilitate payments being made in and out of the ecosystem, or putting up offers on the orderbook to maintain liquidity between assets, right now all of that stun and a completely centralized way where an entity controls their orderbooks and so long as those orders are out there, they're free to take them down. And when you have these cross border payments moving back and forth, that can become an issue if a large portion of the success happens on one entity maintaining liquidity. So it + +[43:00] Would be possible that a liquidity provider could create a smart contract that creates orderbooks that are decentralized, where they don't have the ability to alter or remove the orders, that it just runs the liquidity and doesn't remove itself, doesn't have a capability to remove itself. So a lot of it has to do, I think, with removing barriers towards trust. And this is actually kind of interesting because that's one of the biggest barriers when doing cross border payments is: do I trust them? Are they going to do what I expect them to do? By using turning signing servers are really by adding decentralization or trust minimization into your workflow, into your business logic, you're able to guarantee some level of trust because you relinquish some portion of control, whether that's liquidity providing or making good on payments. The more that you can guarantee through math that something is going to happen, versus guaranteeing through goodwill- or I have a good business plan, I've done this in the past- the better off you're going to be when it comes to winning people's trust, like it's going + +[44:00] To become true more and more as other businesses do that and say: you don't trust us. You verify through the code that we've written that we don't ultimately control, weave or link which control of our secret key, our ability to alter the state of the ledger, to these transient servers which ultimately own the contract. They run the contract through users interacting with it. So in that sense, depending on how it's implemented and at what level, it makes sense for a company to add in trust minimization to their business logic. Pterence and insurers will be useful for cross border payments or decentralized finance, but a lot of it again is going to ultimately boil down to what is decentralization mean and is that actually helpful when it comes to trust minimization or making payments across borders. Is SCF going to run a turn signing server? Who runs strange signing servers without any incentive? I don't think SDF is going to reconcile you server. I mean I run a couple right now just as testing + +[45:00] Demos. But yeah, there is tons of incentive to run a tutoring center. We didn't talk a whole lot about fees or uploading contract fees, but that's a big part of running a churning signing server. It would be kind of a separate talk. What is it like to run a turning sign server but you can charge excellent fees. So basically part of the XDR: when transcending server spits out a transaction, part of that transaction is payments to the turn sending servers. So those are these small fees paid out to the Turing signing service, as well as fees for actually uploading a contract. So it might be ten or a hundred lumens or five USD to upload a contract to a turing signing server. So there's both the incentive for people to upload contracts but also to run good Turrentine servers so that you can continue to collect on fees as cheering sighing servers are executing smart contracts. Don't understand why you need + +[46:00] Signed transactions with other party. When you share your sign, transactional attorneys, sensor, owner of training center to execute your contract only if that Turing something. Let me read the question. I really don't understand why you need Turing signing servers to share signed transactions with other party. When you share your sign transaction with Turing signing server, owner of Turing signing server can execute your contract signed transaction to attack you again. That's only if you allow that. How did you write your contract? Because you some turing some contracts may accept a incoming XDR, but it's not. It doesn't have to be a signed transaction that's coming in, it's just instructions or it's just a contract. It's just a that's been submitted to a jury in signing server but it doesn't have any signatures on it yet. And if you write your turn signing server contract in such a way that the output is a partially signed transaction, so not full, where you still need to sign for the source account, you still need to sign for those fees maybe, but there's still another signature that's needed, the + +[47:00] Transaction is only partially signed. It's not Val viable to submit to the network yet. So what? Someone? Yeah, a lot of it is gonna boil down to trusting all of the Turing signing servers together. But you already trust that with the way the seller consensus protocol works, where you trust the individual nodes that are running Stellar. So this idea of disinterested but incentivized third parties is quite strong and it has been proven to work quite well. So ultimately it boils down to again: you get what you permit. And if you allow for the generation of a transaction that is completely signed and valid, then, yes, the turning signing server could submit that. But even that isn't necessarily an issue, unless they're submitting something that you don't want them to submit. It ultimately depends on what that transaction accomplishes. But because you're splitting up your secret key between lots of different during signing servers, any single one probably shouldn't- unless + +[48:00] You've allowed for it- be able to do anything on their own. They would need to collude with other turing signing servers to actually accomplish anything dangerous. So what's the difference between sharing your signed transactions contracts with Turing signing server over traditional servers? The only difference is they aren't their traditional servers, just operating as turned sending servers. Ultimately it boils down to two owns the turing psychic servers, because if you own them, there's not two centralization there, but if some other entity owns those, some other entity is running those, so long as again, you're uploading it to multiples of those you're running sort of a. It's not a clone, but it's similar to how Stellar works already, where SDF and Lobstr CoinQuest all run validator nodes and you submit a transaction to their network and they validate the transactions and perform the operations + +[49:00] Within those transactions. This is kind of the same thing where you have a secret key which is split up amongst lots of different servers and those servers then add their signature to any transactions coming from the contract and so long as it's valid and it's all the same, and it's a valid signature, it can be submitted to the Stellar network. How would you see this use for subscriptions, like you mentioned before? Also, does this support more than just JavaScript? So I gave an example on the subscription model. Ultimately, you would just have contracts that were built for different amounts, or it could maybe take variable amounts. I haven't thought too deeply about exactly how would we use. My purpose isn't to be too stringent on how it should be used, and more provide a platform that works and then allow other people to build on top of that. So you kind of would have to think: ok, if I can either get ownership over someone's account, but in a very controlled way, + +[50:00] Through contracts, only through contracts, or I can give ownership to my account, but only through strict contracts, what does that allow for? What are the capabilities there? And then there's tons of them and subscriptions would be one of those with fee bumps, I don't know. I will be interesting to see if fee bums kind of gets rid of the issue with recurring revenue, since you can pay sequence numbers and fees from different accounts now. So some of the issues we had there may already be gone. I haven't looked into that too deeply, but when it comes to complicated or turing complete recurring revenue models, or you need to do much more beyond just paying fees and sequence numbers. You actually need to do some logic, external logic, or taking accounts of the person's account. You know like refunds or oh, they partially paid, and now we have to perform some logic around. Some sort of partial payment or upgrades and you know more strike like functionality. Then you could use something like this + +[51:00] To accomplish that through, because the turn signing server or smart contract logic and again you don't do. You need to do that. I don't know. That depends on how much your users need and value a decentralized option for something like this or a decentralized option, and maybe they do, maybe they don't. That, ultimately, is up to the users and what they expect out at the services that they're making use of. All right, do we have any other questions? Those were good questions and again this isn't set in stone. This is very isn't discussion and we don't want to spend our time doing the wrong thing, but at the same time, all you have to do is spend a little time looking at Ethereum to realize that smart contracts and turing complete smart contracts is a very powerful tool that is very valuable and I think it's worth taking a look at. Is there opportunity, doesn't make sense for us to include some sort of functionality like this functional + +[52:00] Feature- parity- that would allow for decentralized transaction creation where you don't have to trust the person who's creating the transaction. You can mitigate that, you can relegate that through other parties. All right, SuperDuper, I appreciate everyone hanging out again. Feel free to reach out on key base or Twitter with follow up questions. I'll try and get working links to these demos as well as some API Doc's for people to poke around. If you want to get involved building these things or testing it out, be sure and reach on them, you know. Thanks again or for coming out, and we'll catch y'all later. Bye. + +
diff --git a/meetings/2020-07-15.mdx b/meetings/2020-07-15.mdx new file mode 100644 index 0000000000..7814403237 --- /dev/null +++ b/meetings/2020-07-15.mdx @@ -0,0 +1,149 @@ +--- +title: "Stellar Development Foundation Q2 in Review" +description: "A quarterly review covering ecosystem growth, anchor expansion, protocol upgrades, and developer-focused improvements across the Stellar network." +authors: [denelle-dixon] +tags: + - community + - CAP-15 + - CAP-18 + - CAP-23 + - CAP-27 + - CAP-28 + - CAP-30 + - CAP-33 + - SEP-30 + - SEP-31 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This quarterly review walks through progress across ecosystem growth, product development, and network operations, alongside updates on public engagement and regulatory participation. Leaders outline how recent protocol upgrades and ecosystem proposals are shaping a more usable, scalable, and compliant network for real-world financial use cases. + +The session also highlights how anchors, wallets, and application-layer tooling fit together: improving user experience, reducing operational friction for businesses, and preparing the network for higher-volume, regulated activity without compromising decentralization. + +### Key Topics + +- Samsung Blockchain Keystore integration enabling secure private key storage on supported Galaxy devices and opening a new distribution channel for Stellar-based applications. +- Anchor ecosystem growth focused on opening real payment corridors, with new and expanding anchors across the U.S., Africa, and Latin America. +- Vibrant wallet beta and SEP-30 account recovery as a path toward mainstream-friendly key management without third-party custody. +- Enterprise Fund investments supporting ecosystem companies, including cross-border payments and next-generation financial platforms. +- Network growth metrics showing steady account counts, rising payments, increased DEX volume, and stable validator participation. +- Protocol 13 upgrades: + - Fee-bump transactions allowing applications to cover user fees and validators to respond to changing network conditions. + - Fine-grained asset authorization enabling compliant issuance without disrupting markets. + - Multiplexed accounts simplifying exchange and custodial account management. +- Upcoming Protocol 14 features: + - Claimable balances for payments to unprepared recipients. + - Sponsored reserves allowing applications to cover account minimums for users. +- SEP-31 anchor interoperability to support end-to-end fiat transfers and travel-rule–compliant information exchange. +- Expanded policy and public engagement through global forums, virtual events, and ongoing regulator education. +- Meridian conference announced as a virtual, free event focused on global connections and real-world use cases. + +### Resources + +- [CAP-15: Fee-bump transactions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) +- [CAP-18: Fine-grained control of asset authorization](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md) +- [CAP-23: Claimable balances](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) +- [CAP-27: Multiplexed accounts](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) +- [CAP-28](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0028.md) +- [CAP-30](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0030.md) +- [CAP-33: Sponsored reserves](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) +- [SEP-30: Account recovery](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0030.md) +- [SEP-31: Anchor send/receive interoperability](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0031.md) + +
+ Video Transcript + +[00:00] Okay, let's go ahead and get started. Thank you everyone for joining today's Stellar Development Foundation quarterly call. We are very excited to have you all on today. We've got an exciting agenda, a lot to cover, because SDF and Stellar really had a big second quarter of 2020. Danel dixon, our CEO, is going to kick things off in a moment with an announcement about our work with a fortune 15 company and to also share some other highlights and achievements we've worked towards in q2, and she'll be joined by bill bar height, CEO of abra, about the exciting work and products that abra's been working on and that are powered by Stellar. And Justin Rice, our head of ecosystem at SDF. He's going to provide some details on our technology updates and our overall network performance, as well as share some more about our terrible charitable giving program that we just announced with our ecosystem partner, loomanthropy. And then our general counsel, candace kelly, is going to touch on SDF's growing visibility and our public engagement activities and the work we've been doing + +[01:00] To increase our involvement in the policy dialogue. So, like I said, a lot to cover and, of course, we'll leave some time at the end to answer any questions you have about today's presentation or the report that you should have all received just shortly before the call started, and you can submit questions throughout the presentation in the chat box marked Q&A in the left. In the chat with that, I'm really excited to turn it over to our CEO and executive director, danielle dixon, for the moment. We've all been waiting for danielle. Thank you, lauren, it's so awesome to be here. Thanks everyone for joining the call today. I just really love these quarterly calls. I actually really love going back and reflecting on the quarter that we just had. So I'm really excited to be here and thank you guys for joining us. So let's get started. First, I'm super excited to announce and to highlight that in q2 we finalized sellers integration into Samsung's blockchain key store. So over the last several months, we've been working closely with Samsung to integrate Stellar and I'm happy to announce that Stellar end users are now + +[02:00] Able to securely store private keys on select Samsung Galaxy smartphones. I've been saying this a lot lately, but key management is an important part of how we will drive our technology into the mainstream, and Samsung provides a key management solution that is user friendly and will ultimately drive greater adoption of blockchain technology, because it's so easy to use. It's designed with the user in mind, which is just a really important thing that we need to continually focus on in blockchain. This integration is also significant development for the great ecosystem of applications built on our network. It's going to allow Stellar ecosystem developers to create blockchain apps and services for Samsung Galaxy smartphones in addition to existing Stellar-based products and applications. It just opens up a whole new network of users that can benefit from the combined innovation of Stellar and Samsung. We already have four Stellar-based businesses adopting the required SDKs to make their applications available in the key store. This is including d stock and satoshi, + +[03:00] Pay, lightmint and nodal. But our work with Samsung isn't going to stop here. We're committed to collaborating to expand and improve the blockchain ecosystem experience for consumers by working with developers who built, who have built on Stellar network and incorporated the appropriate SDKs into their applications to utilize the Samsung blockchain keystore and the integration into their applications. So together we'll partner in an ongoing effort to recruit, onboard and support developers in the Stellar ecosystem. It's just the beginning, as we work together to empower more developers and users to leverage blockchain in the capabilities presented by this integration. We're really proud of this development and it's reflective of the growth we're seeing in all aspects of our business. So it's clear that 2020 has been a year unlike any others. Across industries, we've all had to adapt to a new normal. Fortunately for SDF and for Stellar, we're continuing to grow and thrive even during these challenging times. We've grown our capacity + +[04:00] And our team, we've brought exciting new leaders onto our board and we've made strategic advances on every core tenet of our roadmap to creating equitable access to the global financial system. And I'm just so proud of our work in these areas. So I want to just take you through the three pillars of our strategy. First, we've talked about our requirement that we foster sustainable use cases, and so we're growing our anchor footprint in regions around the world. We're building product and we're making strategic investments to be able to support this pillar. Second, to ensure the robustness and usability of Stellar, so we've rolled out an upgrade to our core protocol. We've expanded our developer tools and our network. Health, when you look at the numbers, is stronger than ever, and Justin's gonna go through that in just a few more minutes. And then, finally, to be the blockchain that people know and trust, we've fully embraced virtual visibility by making the rounds in virtual conferences and hosting 20 events of our own. Candace is going to share some of those highlights later in the call, in addition + +[05:00] To how we're getting more visible with policy makers and regulators. So the tldr is that we've made significant progress in q2 on all of our goals. Our strategy seems to be working and I'm convinced more than ever that our progress is proving out to be a strong year for Stellar. So let me share some proof points. First, let's talk about what we're doing to foster sustainable use cases on Stellar. Anchors are a fundamental part of our network to facilitate moving value from the traditional banking system into Stellar and vice versa. The network relies on these entities that we call anchors. anchor services help create a world where the existing financial structure is connected and interoperable with Stellar. So this quarter we continued our focus on building strong anchors in strategic markets. So this included bolstering the U.S. anchor ecosystem with the addition of inclusive, which offers compliance as a service and accounts and payment capabilities. We also expanded + +[06:00] On calories success in Nigeria and we're supporting new anchors such as click pasa in Tanzania, anchor mxn in Mexico and end tokens in brazil. I just can't say how important anchors are. It's an essential function for enabling frictionless movement of value between fiat currencies worldwide and we're going to continue to expand our anchor footprint in markets where the data indicates that the Stellar network can have an outsize impact, specifically for remittance companies serving businesses and individual lenders and payment processors. We've also been building what we've talked about before, which is our product, a wallet called vibrant that saw its beta launch this quarter. Vibrant is a wallet that will soon be generally available in argentina where users will be able to hold, share, convert and send value. It also showcases SEP 30 seller ecosystem, proposal 30 about key recovery mechanism. That is very user friendly. So I encourage you all to check it out, even in the beta form. And + +[07:00] Then the other important way we've been fostering sustainable use cases on Stellar is by investing in businesses on the network. We made two enterprise fund investments in q2: one investment in satoshi pay, a platform for connecting the world through instant payments, and one of the earliest adopters of Stellar. Satoshi pay expanded their business to tap into a growing b2b cross border payments market where there is huge potential for a player like them who has been in this space and knows the opportunities and the challenges. The other investment went to opera, a next generation financial platform, which is facilitating opera's integration with Stellar as its blockchain back end and offers expansion of financial services that will bring new products powered by Stellar. So, to share more about this work and the progress they've made so far, I'm happy to introduce bill barheit, the CEO from oprah, to the call. Thanks so much, bill, for joining us, thank you for having me, and good morning or good afternoon wherever you May + +[08:00] Be. So I'm going to spend five minutes just giving you an introduction to abra's business and then danielle and I are going to speak a little bit. So if you'd like to move into the slides for forever. So just to give you- those of you who don't know abra- a quick overview of the services we provide. Abra is one of the more successful, most successful wallets in the cryptocurrency space today, although we have users now using abra for storing fiat and moving traditional money around as well- hundreds of thousands of users in over 100 countries- now I think we're approaching 150 countries- and we've processed processing a billion plus in transaction volume yearly. Just the ex team has executed extremely well over the last couple of years and we're really excited now to take this to the next level with Stellar, and we're really excited that they've joined our investor base, including american express and foxconn and arbor fidelity back fund. So just + +[09:00] Let me walk you through some of the details on the next slide, if you will, of how this all works. So, in order to provide this awesome service that we have, we've had to build lots of capabilities, both what we call our core capabilities as well as our core services, for depositing, earning interest, borrowing, investing and soon paying and spending, and then, of course, all of the back end capabilities that go into effectively running a fintech, financial technology company. Let me walk you through a couple of those now and I'll go to the next slide. So what everybody knows, of course, in the front end is the ability to invest as the world's beloved cryptocurrency investing wallet app. We basically work with lots of exchanges and lots of partners to facilitate the buy, sell, hold and transfer of dozens of cryptocurrency assets and now lots of assets, + +[10:00] And we run a very large liquidity system to allow consumers in lots of countries to get money in and out of the system, both in fiat, via the banking system, bank wires, ach- what most of us in the us would know, as well- as cash network, so that folks in, for example, in southeast asia, philippines or in central america, like in guatemala, can actually use cash in their pockets to get money into the abra app and then withdraw cash from the abra app. And to do that, we've built a network of retail partners to process deposits and withdrawals into the app. It's very complex, difficult part of our business, but it's actually growing faster even than the bank part of the business, which is really exciting. It's been exploding the past few weeks in particular, so that's something we're really excited about. On the next slide, I give you, if we go to the next slide, there + +[11:00] Might be a slight delay. Can you, are you earn interest? I see it. So sorry about that. So yeah. So just to give you a little sneak peek on some other capabilities, this is the interest earning capability that we're going to be launching in the next couple of weeks. This allows consumers, for example, people who not even aren't even familiar with cryptocurrency, to store dollars and earn significant interest on those dollars, or actually, our existing crypto users can earn interest on their deposits as well. We'll be supporting staking currencies to earn interest there and other currencies over time. So in the next slide, you can see that, in addition to you know just what we the interest earning on the front end, we've built a very sophisticated lending system where we have very large institutional partners that + +[12:00] Are vetted in our risk management processes and basic, are borrowing those funds in order to generate high yield for our consumers, and that is that includes everything from minors, hedge funds, large exchanges, other loan originators, and this is a good segue into what we're working on with Stellar. So, if you'd like to go to the last slide, really the next phase for abra is, as I alluded to for us to take this to another level is building an entirely new part of our business in order to facilitate the movement of funds globally in real time using the Stellar network, and to do that and what that will enable for us is all forms of global lending. The first kind of generation of lending that you hear about in the crypto world has to do with people leveraging themselves to go along in crypto, or people + +[13:00] Borrowing against their bitcoin or Ethereum holdings. We want to take this to another level and use the Stellar platform to truly enable traditional banking applications at global scale: trade, finance rights, cross border p2p loan, origination loans like that, for example, that are very difficult for people in the west to take advantage of in many cases, we think are totally possible now leveraging the power of crypto and distiller platform. So this is just a quick kind of peek as to what's coming and what we're really excited about to be working on with Stellar, so I'd be happy to answer any questions. Well, thanks, bill. Well, thank you the idea of me being the one to ask you questions. I've been on your podcast, so now it's my turn. I get to ask you a few. One of the things I wondered is: you know we talk a lot about financial inclusion in blockchain and I just wondered, like, from your standpoint, how is aubry focused on financial + +[14:00] Inclusion and what are the? How do you think about that from your business side? That's a great question. We see financial inclusion as basically leveling access and creating a level playing field for access globally, so that somebody in- I mentioned philippines earlier, you know a farmer in the philippines, in mindanao, for example, where we actually have users at abra can access the same services in a banking app as an investment banker in san francisco, which is a place where we also have users, and I think abra is one of the first companies to really try to create a level playing field across all of these different geographies in a way that doesn't really differentiate from person to person. Obviously, in terms of how people might get money in and out of the system would be different for a cash user in southeast asia versus a banked consumer in san francisco. But, other than that, the core services they want- in terms of the ability to invest and earn, the + +[15:00] Ability to borrow, the ability to send and share funds, they're all the same. The amounts might be different, but the ideas and the basics of what they want are effectively the same and our goal is to enable that globally. And do you think that in our industry, we're already making strides to really push this notion of financial inclusion forward? Well it's. I think it's a mixed bag. I think that there's a lot of people kind of at the bottom of the income pyramid who've done fantastic work in building kind of micro finance services. That's really hard, it's very expensive, it's generally not self sustaining with some exceptions in Mexico and india, I think we've seen some examples of how microfinance can be self sustaining but when you move into the middle of the income pyramid, which is where abra is more focused, it really becomes more about creating a holistic, kind of global oriented user experience that most + +[16:00] Banks don't understand. They focus on bank branches, they focus just purely on compliance is obviously very important, but it has to fit in with the user experience and we're starting to see this in some of these challenger banks, particularly in Europe and in Latin America, that are gaining traction and that's very encouraging and I think you're going to see kind of a whole other generation of these challenger kind of services now that are just going to leave the banks in the dust. Okay, so now I want to move a little bit back to Stellar and would love to just have your few thoughts about why you selected Stellar and why you thought Stellar was a great entity, a great chain to build on. Sure, I think for us, we're big believers in leveraging the power of cryptocurrency technology, blockchain technology and now stablecoins, which has become kind of a flavor of the month. but it's simply a way of leveraging cryptocurrency technology to represent traditional value on + +[17:00] A blockchain to facilitate real world banking applications. The first generation of all of this, whether it's the deep fi, which the flavor of the month, have all been around crypto for the sake of crypto, and that's fine. We needed to get where we are somehow, but we're excited about making this globally accessible for traditional banking applications. Many challenges there. One of the biggest challenges we faced in just the kind of the 1 0 of abra, was scalability. A lot of what we originally built, for example, was built on bitcoin and it just wouldn't scale right. So what we need is the ability to have a platform that can create these stablecoins but can scale to truly global applications in time and space for us in a way that the kind of a space for us, in a way that the kind of a lot of the first generation platforms could not, and that's where we think Stellar truly shines. All right, I love that. Now let me just ask you one more question before we turn it + +[18:00] Over to Justin. And if you think about the rest of the year- and what a strange year it's been- what is the one thing that you would love to see in this space change and develop by year end so that we could actually blow things up in 2021? Yeah, that's a great question. So a lot of. The first generation of stable coin products have been very us dollar centric there's a little bit of a few examples of other things, but I would really like to see some major g10 currencies really go for or accompany startups, start to develop some g10 currencies beyond the dollar using stablecoin technologies like Stellar or others, and start to see a lot of traction in, forex applications, money transfer, remittances, cross border business payments that start to take advantage of this ability to do on the wire money transfer. If we see that, then we know that we have real proof points for crypto being + +[19:00] Useful in the- quote unquote- real world, and we know it's coming. It's just a question of: is it going to happen, you know, in the next few months, you know the next year, et cetera. But that would personally excite me the most. It may sound boring, but that's what it's going to take to make this really useful at global scale. So awesome, that would be great to see that happen. Thank you so much, bill, for joining us, and I know that you'll be available afterwards for also questions. So at this point I'm going to turn it over to Justin, who's going to walk us through some network stats and other things that he wants to share with us. Great thanks to now. Thanks, bill. I'm Justin Rice, head of ecosystem at SDF, and I'm super excited to be here to share more about the continued growth of the network and the development of Stellar technology. So Stellar is open source and open participation and many of the advancements SDF makes are in collaboration with our ecosystem. In these uncertain times when it's clear so many are struggling, we've been asking ourselves how can we work with our ecosystem to make a difference. + +[20:00] Now, of course, we believe the technology we're working on will help address some key underlying issues over the long term, but we also continue to ask ourselves: what can we do to help today? So we've decided that part of our short term answer is to continue our partnership with lumenthropy, which is a Stellar-based platform for charitable donations, this time to rally our community to support an organization deeply relevant to today's challenges: the usf institute for non violence and social justice, which is a 501c3 driving research, education and advocacy to advance the practice of transformational non violence so for the entire month of July, we will match lumen donations made by our community to the institute up to two million lumens. We kicked off the campaign with a 100 000 lumen donation and we're excited to see what our community will contribute to make the world a better, more generous and more compassionate place. Now let's take a look at this quarter's numbers and talk a bit about what they mean. So here are some key network stats and + +[21:00] We'll sort of go through them quickly. The number of accounts on the network has been stable, hovering around 4 5 million total. We've seen a slight increase in the number of daily operations, up 9 from q1, and that represents solid network throughput and speaks to the scalability of the network. There was a significant uptick in average daily trading volume on Stellar's decentralized exchange this quarter we saw about 5 9 million lumens worth of trades per day, which is a 70 increase from last quarter. The total number of payments has also grown consistently over q2, 9 million to more than 13 million. As of June 30th, there were 7 893 on chain assets, which is a lot. I think it's important to note that we're working on a method to distinguish and measure meaningful assets. Those are assets that contribute to the connected financial infrastructure that we're trying to build assets very in line with the anchor strategy that danel mentioned earlier. And many of these 7000 plus on chain assets + +[22:00] Will be beyond the scope of our area of focus, but one thing is for sure: people find it very easy to issue assets using Stellar. Lastly, let's take a look at where the network is in terms of health and decentralization. There are currently 23 tier 1 validating nodes and about 120 nodes total. That's about the same as last quarter and what we mentioned then bears repeating. The network would continue. It would keep going even if the SDF turned off all of its notes. What's more encouraging is that we're seeing more organizations in the pipeline to join the ranks of the tier one validators, and that's because there are more businesses invested in the network, want to increase their engagement and are capable of doing so. Those tier 1 validators are fundamental to the health and stability of the network. They're also important to its governance and they help drive important upgrades to the network, like the adoption of Protocol 13 and 12, so Protocol 13. On June 18th, the Stellar public network upgraded to Protocol 13. Like + +[23:00] All important network wide settings, protocol version is decided by validating nodes who vote for and agree to new protocol versions, the same way they vote for an agree to transaction sets. A protocol upgrade implements a bundle of Core Advancement Proposal, also known as CAPs, and Protocol 13 included five. The first two, [CAP-28](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0028.md) and [CAP-30](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0030.md). They're really technical optimizations. One involved pre auth signers, the other involved operation response codes. We won't get into the nitty gritty details here, but the key takeaway for those two- they both held Stellar Core and therefore the network as a whole run a little leaner. But what made this release really exciting was the addition of three powerful new features. The first is fee bumps- [CAP-15](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md)- which empower app builders to cover user fees so they can create simpler and better customer experiences. They also allow validators to increase the minimum network fee without invalidating pre-signed transactions, thereby giving them the flexibility to respond to changes in network usage so + +[24:00] They can ensure that Stellar continues to function as a fast, efficient and decentralized system for payments. The second feature in Protocol 13 is fine grain control of asset authorization- [CAP-18](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md)- which grants issuers of regulated assets the ability to approve user transactions on a case by case basis without cancelling open orders on the books. So this optimizes Stellar, so it's a great option for issuing assets with high compliance requirements, like security tickets. And the third Protocol 13 feature is first class multiplexed accounts- [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md)- which allows exchanges and custodial services to map a single Stellar account to multiple users without relying on inefficient workarounds. We've learned a lot by talking to some of the larger crypto exchanges, and this change will not only make their lives easier. It will also pave the way for businesses that want to manage client accounts to build on Stellar. In addition to these changes at the protocol level of the network, we've also made some important advancements for developers and businesses on + +[25:00] The application level. For example, we introduced SEP 30- to now mention that earlier. It's a seamless, user focused solution for key management and key recovery. As danelle was mentioned then and when she was talking about Samsung, key management is one of the biggest impediments to user adoption, not just for Stellar but for crypto generally, and it's also one of the most difficult problems to solve, for keys aren't something you can remember. They're complicated strings of letters and numbers and in most crypto apps, if you lose your key- you permanently lose access to your account, and that just doesn't work for most human beings. So you can't introduce an app to the general public and grow it at scale. If a small human error can cost you everything, there has to be some sort of recourse. I'm not going to get into all the technical details, but SEP 30 solves the problem of key management by leveraging Stellar's built in multi sig capabilities to allow a user to easily recover access to an account without ever giving control of that account to a third party as danelle mentioned vibrant is built on SEP 30 and I tested it last week all I had to remember to access my all + +[26:00] I had to remember to access my account was my phone number the implications of sub 30 could be huge right it paves the way for a new breed of user friendly apps on Stellar and we're really excited to see what businesses and developers can do we also did a complete overhaul of our developer documentation the docs are now leaner better looking and more focused on today's use cases they're organized around learning paths and will continue to add more learning paths as network use cases evolve Stellar's always been known for having good documentation and we kept everything that was working with this new update we just made it even easier to learn how to build on Stellar that's it for me so with that it's my pleasure to turn it over to candace kelly our general counsel to share with you what we've been doing to be the blockchain people know and trust candace thanks so much Justin it's great to be here again I'm candice kelly the general counsel it's really encouraging to see all the growth in our network and in the technology we've also been busy this quarter getting vocal and visible virtually + +[27:00] Of course in this pandemic time on the policy front we became a platform partner in the world economic forum center for the fourth industrial revolution as a member we're on the digital currency governance consortium which is a working group of industry stakeholders addressing key questions and governance gaps posed by new forms of digital currency including stablecoins the consortium has a number of working groups and we're focusing specifically on the digital currency use cases the working group will assess where digital currency may add value including a number of areas that are right in our strike zone for example the potential for digital currency to replace cash particularly in pandemic time issues related to the need for lower cost remittances and cross border payments financial inclusion money laundering and strengthening of AML cft capacities domestic and cross border regulatory oversight and coordination monetary control and central bank digital + +[28:00] Currencies and since as Justin just mentioned Stellar was designed for stablecoins and cbdc's we find that it's very easy to issue assets on the Stellar network we're looking forward to see how we'll advance this important work of the world economic forum and then to strengthen sds capacity to join more policy discussions and forums like the west and to raise awareness for Stellar within the policy maker and regulatory spheres we also hired a new head of policy and government relations who's focused primarily on engagement in dc but he's already getting up to speed on policy issues that reach well beyond our borders as well but we haven't just been getting vocal with policy makers we devoted a lot of q2 to both joining and hosting virtual events we participated in eight industry events we ran a Stellar foundation session at consensus distributed where we shared the vision of Stellar in 2020 + +[29:00] And also highlighted businesses built on Stellar both Jed and danelle were on stage at block down to 2 0 Jed was on a mastermind panel on interoperability and danelle delivered a keynote on her favorite topic of the moment key management danelle also presented to a global audience of entrepreneurs at road to tycon in June we also focused on expanding our own capacity to host virtual events which resulted in a total of 20 events we had 16 in english and we had four in spanish throughout the quarter ranging from webinars workshops round tables engineering talks some of the highlights include a webinar on anchor basics the business and benefits of being an anchor a webinar on compliance and the Stellar network supporting our announcement of our partnership with elliptic an engine engineering talk on kelp which is a free customizable open source trading bot for the Stellar universal marketplace and + +[30:00] A roundtable discussion on ecosystem standards for creating payment corridors which ultimately resulted in the creation of SEP 31 which is helping anchors in the network sending and receiving transactions which is a tool that helps them to comply with the travel role in addition to our aggressive efforts in the communications front SDF executed a series of paid marketing initiatives to further awareness and understanding of the Stellar network our two speaking appearances at the consensus distributed conference were complemented by sponsorship activation within the virtual conference as well as within coindesk podcast series additionally SDF advertised on the unchained podcast hosted by laura shin and we've also begun testing the effectiveness of online search and display media we also started preparations for our biggest virtual event of all which is our second annual Stellar conference Meridian is set for November 16th + +[31:00] Through the 20th and in the context of the ongoing pandemic we've decided that the event will be held virtually and it will be free of charge the theme of the event is global connections to solve real world challenges stay tuned for updates like speaker and program announcements by signing up at `Meridian.Stellar.org` so that's it for me lauren I'll hand it back to you thank you candice and thank you everyone those we're now ready to open it up to Q&A so we're now ready to open it up to Q&A so if you have questions please submit them in the chat box in the upper left hand corner of your screen it looks like we already have one that we can kick off with danelle I think this one probably best suited for you is to what extent has SDF's work been impacted or not by the pandemic that's such a great question and it's such a hard question because as you can see by what we've done in q2 during the heart of the pandemic I think + +[32:00] We've done awesome work and I'm so grateful for all of our team members who've spent so much of their time focused on the work even in a time when there's so many other things in the world to focus on so we've kind of nailed it in terms of the work that we've done but I can't pretend that this hasn't impacted SDF in particular just because our SDF in addition to our the Stellar ecosystem from an sds standpoint we have we were open to remote already we had lots of employees that were working remotely and so this has just broadened that and made it so that we're all working from our homes and in our home offices and really I think doing a great job working together but I have to say like it's hard and I think everybody's experiencing this all around the world that it's it gets hard when you don't have as much human contact as you're used to so we really are trying to focus on getting folks to step away from their computers and to have some of their personal and alone time so that they can really like fuel what they need to feel in their life so that they can still give back to us + +[33:00] But also to their families I will say that I think that we're very mindful that even though SDF I think has been pretty strong and done some really great work and had and we're stable in terms of our financials there are others in the ecosystem who've been impacted so we keep a really close eye on how the ecosystem is doing just to make sure that we have an understanding and that we can do what we can do to support the ecosystem with respect to any kind of work that we can do documentation we can provide to make things seamless and easy so all told I think we've done a fairly good job of really staying focused during this but I can't deny that it's just hard I think it's hard for all of us thanks danielle I think the next question we have that came in here is for bill at abra so I think you know we made the announcement back in may about how you're going to be using Stellar and so maybe you could explain kind of what's happened from may to now sure so this is happening in phases + +[34:00] For us yes we are our goal is to build a series of global banking services which also take advantage of the Stellar network and the first phase for us is our lending business and that's already launched where we're basically building relationships with lots of institutional players who will be partnering with abra in the lending space that tends to be stuff that we don't necessarily announce but is essential for building up liquidity in our network and then the technology part of this is something that we'll be working on mostly in the second half of this year and then the launch of that will also is actually related to the question danelle just answered to the state of the kind of the lending markets because of covet and you know for example if we're looking + +[35:00] At cross border p2p loan origination for example or cross border trade finance applications a lot of what we end up doing from a risk management perspective will depend upon the state of the markets and you know this is the goal here is to and this is the second part of your question that I see benjamin is that this extends what's working in DeFi way beyond just crypto for the sake of crypto but really to make it truly useful in traditional finance applications my favorite is cross border p2p loan origination because this means that loan originators there are many p2p loan originators for example now who are well funded in southeast asia Latin America but they have to source capital locally and it's often very expensive because the traditional banks won't play and by opening that up to other forms of capital that means that we can eventually drive the + +[36:00] Rates down for small business and individual loans in those emerging markets and I think that leveraging stable coin technology for example to be able to move that money around very quickly is an essential part of enabling that over time because we just can't do it fast enough be in the banks thank you so I think the next question we've got here is for danelle you mentioned having a growing anchor footprint what's been the driver behind getting more anchors on board and do we have a goal for the number of anchors that we would ultimately get to so anchors are my next favorite topic and SEP 30 and anchors are kind of close in terms of the needs and the value that anchors and then also key recovery bring to the network and just to blockchain generally having financial institutions on the network and creating corridors so that businesses and individuals can benefit + +[37:00] For the from these cross border transactions is just crucial for the success of the Stellar network and so the driver has been very clearly that we need them and the other driver has been demand from we have corridors that are already open and then we have demand from some of the anchors to say hey we would love to have you know another anchor on the other side in brazil for example or another anchor on the other side in Mexico so that we can actually have those transactions go back and forth so it's the increasing volume on the network the increasing requirements that we see to open up more and more corridors that are driving the need for anchors but this is just a really important part of what we need to do and what I see our value add to the network is from an sds standpoint is that we can help these financial institutions to be able to integrate and to seamlessly operate with the network and with the other players on it and I don't look at this in terms of numbers because I think that it's more important that you think about it in terms of corridors being open so we're actually working now to determine + +[38:00] What our we're doing our q3 okrs right now to determine what we want to see in terms of growth but I'm not going to lie I would love to see like 10 20 30 more anchors on the network in the next six months now that's just a huge number to bring on but it would just be fantastic to be able to see more and more quarters open up so that we can have more and more room for other businesses to join and participate in what we see is just a growing ecosystem thanks danielle I think the next question here is probably for Justin are there any technical advancements like future CAPs or SEPs that you anticipate that are in the works yeah that's a great question there are several I'll talk about some of the few like some of the most important ones Protocol 14 is well underway and it involves two CAPs that introduce also new great features [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) which introduces claimable balances they + +[39:00] Allow sending a payment to an account that is not yet prepared to receive it and [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) which introduces sponsored reserves essentially allowing an app to cover user reserves so users don't have to understand or manage a lumen balance both these things work together to basically make it so that you can build apps that manage all of the sort of crypto aspects of Stellar and conceal the complexity of crypto from users so people can build payment apps that look and feel like payment apps rather than like crypto payment apps and that's those two will be part of Protocol 14 which is in the works and once it's done we'll sort of start rolling it out and working with the ecosystem to get the network upgraded well underway the second sort of big one that we're working on and we've been working on for a while is a step again a sort of ecosystem level solution for a problem that many people building on Stellar phase this one is called SEP 31 and essentially it's designed in conjunction with some current anchors it enables two anchors to interoperate to pass required information and + +[40:00] Makes it so a user can send fiat and their bank account directly to the recipient's bank account again this conceals sort of the complexity of crypto from users and it also allows these anchors to comply with the travel rules so they comply with an important regulation that requires financial institutions to share certain info about parties involved in a transaction and SEP 31 is like a spec that has been worked out and in fact we just added SEP 31 functionality to polaris which is a reference implementation that people can use so that anchors can set this up really easily and say those are the two big things thank you Justin okay I think we have another question that came in here and this one I think is for bill so I think some people saw the news about the announcement of the settlement you had with regulators yesterday and just general question about whether you anticipate any impact from that on your business sure + +[41:00] That's a good question back in I believe in the November December time frame and we've long since moved on in our business so basically for the last seven months of the business that we've been running as had basically been post those agreements so it just happens to have been yesterday that the sec and cftc made the announcements we actually have no visibility as to when they're going to do that it's kind of a black that was just a coincidence and as I said abra has long since moved on I'm not allowed to comment on the content of the agreements except to say you know we're fine with how far our business in the us in particular is doing really well and we're really happy with how things stand so no concerns great thank you very much for sharing that and I think we have one last question coming in here and it's for + +[42:00] Candid candice what kind of policy objectives are you looking to achieve by engaging in dc and with like the world economic forum as you mentioned in your part of the presentation thanks lauren that's a good question I think this is a really exciting time in the policy world of cryptocurrency I will say that I think that the announcement of libra last year and then the pandemic and the discussions around digital dollar and getting stimulus checks out in a secure and efficient way has really raised the profile and the interest level on the hill and in dc with both regulators and policy makers in terms of first just understanding what it is I think that there were a lot of folks who were quite well educated on cryptocurrency and blockchain technology but they're a lot who are still getting up to speed on that and + +[43:00] There's a there's an urgency and an interest in doing that which makes it really a great opportunity for us and I think that Stellar the SDF as a non-profit is in a unique role in that we really hope to be able to be a credible voice in just helping in that education department and obviously it starts with education and then participation having somebody now a full time person who can really focus on these issues is fantastic and then advocacy is the ultimate goal so both within dc and then as I mentioned it's impossible with the global nature of our business and this technology to not be jumping in to conversations in all different jurisdictions as regulators across the globe are trying to figure this out and take advantage of blockchain in a safe and compliant way so the world economic forum is one way that we're doing that but we're also engaging with anchors and partners across the globe in + +[44:00] Making sure that we are being helpful in both educating and advocating on that on the policy front great well I think that is all the questions we had thank you to our speakers for joining today if anyone on the call has follow up questions feel free to email us at `media@stellar.org` and thank you everyone for joining us today + +
diff --git a/meetings/2020-07-24.mdx b/meetings/2020-07-24.mdx new file mode 100644 index 0000000000..5a7c3502bb --- /dev/null +++ b/meetings/2020-07-24.mdx @@ -0,0 +1,184 @@ +--- +title: "How to Build an Anchor with Polaris" +description: "A hands-on engineering talk showing how Polaris (a reusable Django app) accelerates building a SEP-24 anchor by providing standard endpoints, UI flows, and integration hooks for KYC and banking rails." +authors: [jake-urban] +tags: [tutorial, SEP-24, SEP-31, SEP-1] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Interested in becoming a Stellar anchor but aren't sure where to start? This talk provides a step-by-step walkthrough of setting up Polaris, configuring a Django project, and implementing the integration points that let you connect your own KYC requirements and off-chain banking rails. The demo focuses on getting a working SEP-24 deposit/withdraw flow quickly, then explains where anchors typically customize behavior for production deployments. + +### Key Topics + +- Polaris as a plug-and-play Django app that implements the “standard” parts of SEP-24 while exposing integration hooks for anchor-specific behavior. +- SEP-24 flow from a user’s perspective (interactive deposit/withdraw pages) and how wallets launch the anchor UI. +- Integration points Polaris cannot automate: banking/payment rails, KYC/user data collection, UI customization, and transaction state handling. +- Setup walkthrough: creating a Django project, installing `django-polaris`, enabling required apps/middleware, and configuring static assets for the built-in UI. +- Implementing SEP-1 via a TOML file and how anchors populate organization + asset metadata for clients. +- Core Polaris background jobs (e.g., polling pending deposits, watching transactions) and why anchors must run more than just the web server in production. +- Deposit/withdraw lifecycle: how pending deposits are detected off-chain, then settled on-chain; how withdrawals reverse the flow by paying users off-chain after on-chain receipt. +- Development workflow tips: local-mode considerations, using Docker/Docker Compose to run the web server plus worker processes together. +- SEP-31 support direction: how Polaris can help receiving anchors interoperate for anchor-to-anchor transfers (remittance-style flows), reducing wallet/user complexity. +- Tradeoffs and ecosystem demand: why Polaris is Django-based today, and what would need to change to justify a Node.js equivalent. + +### Resources + +- Documentation: [django-polaris.readthedocs.io/en/stable/](https://django-polaris.readthedocs.io/en/stable) +- Code: [github.com/stellar/django-polaris](https://github.com/stellar/django-polaris) +- SEP-1: [Stellar TOML](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0001.md) +- SEP-24: [Interactive Deposit & Withdrawal](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0024.md) +- SEP-31: [Cross-border payments / anchor interoperability](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0031.md) + +
+ Video Transcript + +[00:00] Hey guys, welcome to the engineering talk. We are going to begin momentarily. One thing I will say and I think our host has posted in the channel- but if you have questions while I'm going through this talk, go ahead and scan the qr code and you can send questions over and I'll take a look at them at the end of the discussion, okay? So yeah, welcome. This is a talk about polaris, which is a django application and framework that allows you to build anchor servers according to the standards that we define, that are called Stellar Ecosystem Proposal or SEPs. So django polaris is a tool that we use + +[01:00] So that our tool that we build so that anchors can get their anchor service up quickly and with less work required. So I'm going to go into the details of how to build this anchor, but before I do, I'm gonna go over a brief presentation that just describes what it is, how it works and what you can expect in this presentation, so let's go ahead and begin. So, like I just said, polaris is a django anchor app, or I'd like to say it's an extendable django app, which means that if you know anything about django- and hopefully you do in this presentation, because I'm going to be going into django code- but django is a system, it's a framework that allows you to plug and play different applications within the project, and so polaris is an app. It's a reusable app that you can plug and play within django applications, and it provides an interface, an api, that developers can use to + +[02:00] Insert and customize the way that the anchor server responds and handles information. The reasoning behind this is that SEP 24, which was the original standard that we implemented this for, has a lot of things that are standard about it. Every anchor is going to be doing similar things. They're all going to have info endpoints, they're all going to have what the SEP describes. And when I say the SEP- sorry, I'm just going to break out of my. Actually I'll stay in the slides until afterwards, but 24 is a standard and it defines an api for clients to be able to hit, in order to interface with the anchor, and every anchor that implements sub 24 has the same endpoints. The difference, though, is that every anchor has, you know, pieces of functionality that are custom to them. So, polaris, what it does, is implement the stuff that's standard, that everybody's gonna do, and then it allows people to customize their own instances with their- you + +[03:00] Know, their- unique situation. So, overall, it's less time for the anchor to build. People can typically get this up within an hour, because we're gonna do it right now and we maintain it. The SDF maintains it, so we're gonna continue to upgrade this, make sure it's up-to-date with the SDK, make sure it's up-to-date with the SDK upgrades and it's open source. Everything that I am going to use right now is viewable on github, and we have links for the documentation and code in following slides. So, in order to understand polaris, we need to talk about integrations. Polaris offers integrations or integration functions or classes that allow anchors to register custom functionality with polaris. So polaris is going to facilitate you know the api endpoints and build out what a SEP 24 server would be. But there's things that polaris can't automate right. So polaris can't automate your banking or payment rails right. If you're an + +[04:00] anchor in brazil, you know me as the developer of polaris. I don't know what bank you're connecting with. I don't know what api you're using. I don't know anything about that. So what you need to do is inject your own banking and payment rails code into the polaris framework. In the same way, we don't know what information you need from users. So you may need email and that's it, or you may need a photo of the ID. You may need, you know, a variety of different information: social security number. Here in the U.S. There's a bunch of stuff that you could need, you could require in order to deposit or draw assets, and so KYC kind of goes hand in hand with user tracking. You want to know who's using your service, who they are, how to reach them, stuff like that. There's uli customization. So polaris comes out of the box with a decent UI. But if you want to customize that UI, you're free to do so. And finally there's just transaction processing. When the transaction gets to a certain state in + +[05:00] The flow, you may want to update some in your own data models to adjust for the change, and so polaris can't automate any of those things. So it provides rails integration classes. So you would in your code you would write, you know, within the rails integration class, a function that connects to your bank and actually makes payments to users, and you're going to register that integration that you write with polaris. And I'll show you how to do that, so that transitions. You know well how do you use polaris. Well, we're going to do two things in this talk: we're going to install and configure polaris and we're going to implement the integration points, or at least enough integration points, to get the anchor server up and running, and so there's going to be a few steps involved in that. We're going to create a django project and hopefully people who are viewing this are familiar with django and are familiar with the SEPs. I'm gonna brush over some details about the steps so you have some context, but overall this talk is gonna assume that you + +[06:00] Know what I'm talking about. When it comes to django and start ecosystem proposals, you're going to install the django polaris package. We're going to add polaris, the django app itself, to the project and then we're going to add a bunch of polaris settings and configuration options and finally, we're going to register and add our data or asset that we're going to anchor within our database and in the asset that we're going to anchor in this database is a or on this anchor is our Stellar reference token, srt. It's just a fake token on test net that doesn't mean anything, but it's used as demonstration so you can show, or show I can show- how people you know build anchor services around an asset. Once we have all the configuration installation set up, we're going to start implementing integrations. So the first thing that we're going to do is build a an appropriate tomo file and, if you're familiar with SEP 1, this is + +[07:00] Implementing that. It's basically a file that describes the organization that owns the anchor and it gives a bunch of details about contact information and stuff about the asset that clients who are viewing the file are to need to know. We're going to do a little bit of some banking rails, so we're not going to connect to any particular bank, and so we're actually going to kind of just mock this up. But the idea is that you would fill this function in with actual banking payment rails code. I'm just going to do what's required from a functional standpoint to make sure polaris is working. Once we have the tomol and banking rails integration set up, we're going to register those integrations and then, finally, we're going to build the application with docker and get it running. So hopefully that gives you a good overview of what's going to happen here. And again, remember, everything that I work on here is open source and there are there's documentation available. So if you want to check out the documentation, it's available at djangopolaris readthedocs io and + +[08:00] The code is available at GitHub com polaris and these pictures are the are some pictures from our test server or our reference anchor server that shows you how to do it. It's basically an example, and these are just some forms that plars provides out of the box. So this is what the UI looks like. If you didn't do any customizations, okay, and we're gonna do questions after the demo, but for now I'm going to transition into coding and I'll probably be hopping back and forth from the internet as well to reference the proposals and just demonstrate certain things. Okay, cool, we are right on schedule. So this is pycharm. I'm gonna be. You know, django. Polaris is a python application and what we're gonna do when we start off most python applications is create a virtual environment. So, actually, sorry, before I dive into the code here, I do. Before + +[09:00] I dive into the code here, I do want to show you one thing, and that's a demonstration of what you can expect an anchor to look like from a user's perspective, and this is a little demo that we do. So what I'm doing here is: this is a little demo wallet that mocks like what a user would see, and we're going to walk through a SEP 24 flow before we get started so you can just see what it's going to look like. But we're just going to use our reference server that the seller development foundation runs and provides, and we're just going to walk through a deposit flow on testnet. So this is what we're going to ultimately build. We want to get to this point by the end of this talk. So I'm going to make a deposit, it's going to go through the SEP 24 flow and it's going to open up the actual interactive page the user is going to see. So if I was a user, this is the first form that would be presented to me. I'm trying to make a deposit onto into my account on Stellar, and here is the amount field that's asking + +[10:00] Me- you know to specify how much I plan to deposit, and so I'm going to tell them that I'm going to deposit a hundred dollars and on test net anchors need to assume that deposits are actually sent right on testing. Everything's fake, right. So I'm not actually gonna send money to the anchor, it's just gonna assume or understand that it's on testnet and it doesn't need to wait for me to actually pay them. It's gonna pay or send, you know, Stellar funds to my account. And just to give you some like more context, this con config option, this is obviously the reference server that we're hitting the url of it. But this is the secret key of my account and don't worry, there's nothing you know. Fancy on it. You can check it out on testnet. But I'm just gonna deposit a 100 or 100 srt into my account on testnet. So let's go ahead and do that. And so now, django polaris, after I submitted the amount is going to go ahead and work on depositing that, those funds, into my account. So it's executing + +[11:00] On the transaction as you can see, and now it's complete. So if I were to go to my account right now, I could show you that I have at least 100 srt in my account and I'm not going to show you what that is. It's not worth it. But this is a kind of a page that displays all the information on the transaction. So I sent them 100, supposedly I was charged, a dollar and one cents and the amount that I actually received in my seller account, the amount of srt is 98 99 and it, you know, tells you when it was completed in the status and all that kind of stuff. So this flow of being presented some forms, filling out forms, being presented, updates as the transactions submitted and then finally getting a you know notification, that's complete. That's going to be the user experience behind the scenes. Blogs is going to be doing a lot more and I'm going to walk through what that is okay. So that's the end of the demo. We're gonna go into the code and here we + +[12:00] Go- bear with me in case something happens, because something always happens in demos, but let's see how far we get, I think we're gonna be able to do it. So, like I was saying with any python project, you're going to want to create a virtual environment. So I'm going to create a virtual environment right now using python 3. And I have a little shortcut that activates the virtual environment. But let's just do it, you know the normal way. So I'm just going to execute the activate function for the virtual environment. So this is dot, vmv and activate. Okay, so now I'm in a virtual environment, this little enclosed box where I can install my own packages and make sure you know everything that I'm doing here is contained in here and it's not affecting anything else outside my project. And before we do this, actually I'm going to go to the documentation. So this is the polaris documentation. It's available at django polaris dot- read + +[13:00] The docs dot io and it's going to walk you through how to install polaris. Now I'm going to update this documentation. In the future I'll probably have some kind of like tutorial page where it gives you a you know front to back how you set up basically what we're doing in this talk. But for now I'm going to hop around these settings in these documentation pages because you know they have different information and it's not organized in one way where I can just kind of scroll through. So the first that we're gonna do, we're just gonna copy and literally walk through these steps. We're gonna install polaris and while that installs I'm just going to go over kind of what it holds. You know, obviously it holds django and it holds- it really holds everything that you're going to need to run this application. So you don't need to actually install g separately or anything like that. Everything comes with polaris and it's ready to go. So that's done now and now that we have the django polaris package, we have django installed now, because polaris + +[14:00] Comes with everything, and what we're going to do is we're going to create a django project. Now this is: yeah, so I have this function called. Or this command line tool called django admin, which comes out of the box with chango, right? So I'm going to create a django project- start project- and I'm going to call it app. It's going to be super generic, okay. Now, if I look, I actually have an app folder and you can see it in my source tree if you look in app, I have a manage pi script and this manage pi script is basically the entry point to all of django's functionality. So whenever I run python within the django context, I'm going to run python, manage pi some command, and then within the app, I have all the files that are necessary for a django application and we're not going to go over again. This is a django tutorial. We're going to assume that you know generally what this is, but + +[15:00] It's helpful to understand where we're at. So let's get started. So, now that we have our django project, we have an app set up. We are going to add polaris to our app, to our project, right? So the first thing that we need to do is add these three apps here to the installed apps list in settings pi so let's open up settings pi this is a generated file that django creates for you. It comes with a secret key that you're definitely going to want to keep secret, so you would never want to check this file in to get you know it just being auto generated you want to use environment variables and hide. You know those secrets from users and developers. But yeah, this is a generated file for django and here's our installed apps list and we're just going to copy and paste the three apps that polaris requires into the list. Now let me go over what these are. So this package called cores headers- this is a different package outside of polaris that + +[16:00] Is installed with polaris. It ensures that your server allows requests to SEP 24 endpoints from any service or client, so it just sets the course policy for the server. This rest framework is a. It's just a another app that polaris uses to build out its api. And then finally, you install the polaris app and because we have our own app, right, because django, so this is a project, right, this app is a project, but within the app I know that the app and project word is going to get confused here. But within the app directory we have a django application called app, so I can change the name of this top level app thing. That's the project directory. And then within the project directory we have the app directory is essentially the app that we are running right alongside the other apps that we have installed in our installed apps list. So I'm going to also add app to the installed apps list too. The next thing + +[17:00] That we're going to do is we're going to add the coors middleware. So that course package that I told you about it requires a middleware component. Jingo comes default with a bunch of middleware and our documentation actually requests that the coors middleware is above the common middleware. So this is necessary for django reasons. The middleware is the order matters. I'm not going to go too far into deep into it, but you want to make sure that we have all the components that we add and the correct ordering, at least in this section, cool. And then, finally, we're going to add a project root setting to our project and this is going to tell polaris essentially where the top level is and it's going to look for an environment file at the top level of this project. So I'm going to, you know, initialize project root and I'm actually just going to make it the directory containing baster. So baster, it + +[18:00] Was generated automatically by django, but it's actually this folder, right here, it's the right, or actually is it? So there's the file it takes the absolute path of that. It's the directory containing that file. Oh, it's actually this file. So this is the application folder that baster is referencing, and so I'm going to take the directory that contains that directory. So project root is going to be my actual project, polaris, anchor 3. And this is important because we're going to add a dot m file and this is going to contain all of our environment variables that customize part of polaris deployment. So let's go into there. And polaris has a few environment variables right there that I need to just stick in the file. So you know, for the network that we're using, we're going to be using the test net, so this is the network pass phrase for that network and then this is the Horizon + +[19:00] Instance or url of the instance that we'll be using, and then this is our host url. So we're going to be doing this all on localhost. So I'm just going to do localhost dot 8000. There we go and that's it for environment variables for now. And then we are going to add polaris urls, all the endpoints that polaris exposes to our django application so that it's actually exposed. So it's going to give me, It's going to complain to me right now because it doesn't have some of these functions. So we're going to import, include, and then we're also going to import polaris urls cool. And so I don't know if you're familiar with some of you maybe- but so we have this admin endpoint. That's already there. It comes default with every django deployment and then on top of that, at the root of the domain. So in order to get to the admin page, you would + +[20:00] Go to your domain admin in order to hit any of the polaris endpoints, it's just straight on the domain. So polaris. So polaris comes with a bunch of urls. Oh, you know what it's cause? Pycharm doesn't know where my virtual environment is. I was going to go into the source code and show you what's offered. But polaris offers a bunch of endpoints. In fact, they're described right here. All these endpoints polaris provides, and those urls are going to be exposed at the root of the domain. Okay, so now that we have our urls installed, we're going to where's the documentation link? Okay, now that we have our urls added to the project, we're actually going to stop here, because this is a, this is set up. That's going to be required for every installation of polaris. But now we're going to go through the steps that we're actually going to deploy, so polaris can deploy any number of steps in + +[21:00] Any combination. So you could you in polaris and just deploy step one, which is a tomophile. You could deploy polaris and just do step 10. You could do polaris with everything. You could do it with one of them missing, and so forth. Right, so we're going to set up polaris for step 1, 10 and 24, and that encompasses everything that's required in order to successfully run a deposit withdrawal. So let's go to step one. The configuration required is just to add it to our active steps list. So this active SEPs list in settings is a key setting in polaris. It basically signals to polaris what we're running, what kind, what standards do we want to actually run on our server? Because we're not going to run them all by default, right, you may just want one of them, So we're going to add step 1 to our active steps list, and we're actually going to add step 10 and step 24 as well, and this is going to tell polaris what urls to expose so + +[22:00] That you don't have like a SEP 31 url, so when you're not using it. Okay, that's actually all that's required in terms of configuration. We'll go into the integrations later. Step 10 has the same thing, except it requires a few new environment variables. So let's add. So this is actually. This documentation is incorrect. It says: add the following variables to your settings file. You don't need to add these two to your settings file. I'm going to update. This should actually go in your environment config and if you didn't know that, django would complain to you about it, so you'd figure it out. But yeah, so for step 10 we have this thing called a signing seed, which is essentially the server's password for signing transactions. That then the client would sign as well and then the server would authenticate. So I'm not going to go too deep into what step 10 is, but it's just a way for servers to confirm that a user that's using their service holds the account that they're actually trying to + +[23:00] Deposit to, and this should just be like a random key pair that you keep secret. So what I'm going to do is I'm actually just going to import Stellar's SDK import keyboard class and I'm just going to do a random key pair and then I just have the signing seed listed in the environment. So, again, this is something you'd want to keep secret. This would not be checked into git and this obviously won't be deployed, but the code that I'm writing right now will actually be available in on my GitHub and I'll send a. You know, I'll show a link and maybe we'll actually have it in the description of the event. That'd be awesome too. So you can obviously reference what I'm doing here or just watch the video back. Okay, and then the + +[24:00] Server jwt key. So once the client is authenticated, the server is going to return a token that's essentially the client's password for interacting and hitting the server's endpoints, and you just want to have a secret string that you use to encode the jwt. So we're going to have this super secret jwt string. You'd obviously want to make this something different, cool. So now we have everything we need for step 10. Let's go to step 24. Again. We need to have the step 24 string in our active steps. We already did that. But one thing that we need to do on top of that is configure our static assets. So, as you saw in that little demo, polaris comes out of the gate with a UI that's built in and ready to go for you to use and customize if you want to. And it does that by having the static files app installed and having static resources to use for the UI. And so we're going to configure the static resources in order to work and deploy. So, + +[25:00] In order to do that, we're going to. So, white noise to our installed apps list, and white noise is a static file serving application. It just makes serving static files more efficient. So we're going to do that, we're going to go to settings. We're going to go to installed apps. Oh, actually it's a middleware. This is another thing I need to fix. The documentation will be updated by the time I release 1 0 django is her. Polaris is at 0 12 right now. It's a pre 1 0 release, but anyway, this is supposed to go in the middleware, so let's go to our middleware section. It should be above it should. It says it should be near the top of the list for best performance, but still under coors middleware. So we're going to go under coors middleware, cool. And we obviously want to make sure + +[26:00] That the static files app is actually in our installed apps list. And then, finally, we have some more settings for our settings file, and these all pertain to the static files app. So we're gonna have a root directory to contain all of our static files. So each app that we installed rest framework polaris- even the app that we're using right now- has static files that it's going to use when it's running, and so what django does is it collects all those apps into one spot instead of having to fetch them from a particular app. So we're going to call a directory called collect static, which is the name of the command that we're going to use to collect those files, and we're going to have it be within the base directory, so it's going to be just outside our inner app directory. This is the url that our static assets are going to be available at for django. So I'm just going to say it's polaris static, and then we're using this storage component for + +[27:00] The white noise static file serving section. So this is just one of the many ways that white noise can store files and then return them when they're requested by the client. Okay, and then, in order to collect those static files, we're gonna run our first command from manage pi and we're going to collect our static assets. Where did my camera go? Okay, there we go. Okay, so we're going to run that command. But that didn't work because we're not in the same directory as the app or the manage pi script. That's within our app directory, okay, so it gave me an error and it said: nope, you're not ready yet, you don't have a signing seed, environment variable. But we do. So what's the problem here? Oh, I know we don't have our settings file. Our django project isn't aware of our + +[28:00] Environment, and that's that needs to be fixed. So we're going to import django, environ or environ. This is a package that comes installed with polaris and we are going to check. We're going to have an environment object. We're going to check if the path to our environment exists. So you remember how we are going to. I told you we're going to use project root to know if the environment is there, or that's where we're going to look. at the directory, the project group directory, and we're going to look for a dot m file in that directory and then, if it's there, we're gonna read that file. Okay, so now let's try this again and hopefully it knows that we have a signing seat. Now end + +[29:00] Is not defined. You're right, it's because it starts off with the environment package. Okay, so now it's complaining about the server jwt key. That's in the same file as the other environment so or as the other variables. So I wonder why this is complaining about that, but not the signing seed server jwt key? Is it because we have this spacing between the environment and or the equal sign? Yeah, it was, I guess. Okay, we have one more step that I glossed over. I guess there's one more middleware. Oh right, okay. So before we so we set up so this page for this documentation is forcep6 and SEP 24, because they share a majority of the integrations that polaris offers. But + +[30:00] There is some setup that's necessary for step 24, particularly and specifically. We have this same site middleware class that comes custom with every polaris deployment and this is more of a legacy component. We now suggest that anchors- or sorry, that wallets show anchors- interactive flows in pop ups but originally they were we instructed people to open them via iframes in order to have sessions within an iframe, you need to have certain http headers, and so this middleware class was born to inject those headers into every response. So this is required just in case clients open up your interactive flow in an iframe as opposed to a pop up so let's add this middleware class and then we can go ahead. Okay, + +[31:00] Now it's added, and it's supposed to be above session middleware or, sorry, below. Oh no, session nowhere should be below same side middleware. Okay, and then, finally, we need this form renderer class, and this is going to allow for that, really for that UI that we provide, and it's going to allow you to override any of those assets. So this is needed if you want to, you know, overwrite any static assets or use the default UI, which you're going to want to do if you're running step 24. Okay, let's. Okay, we're done with configuration. Now, hopefully, this will allow me to compile my static assets. There it goes okay. So now we have a new directory called static or collect static, and it holds all the static assets that we're going to use within our application. So that's great, and I think, now that we have everything, and + +[32:00] I think, now that we have everything set up from a configuration standpoint, we are free to go back to our database model section and actually create our asset. So let's double check. actually, yeah, let's- before we run the service. Let's create our database. So, in order to create our database, we need to run this migrate command, and in order to do that, we need to- make sure that we have a place for the, data to go. So we're going to create a data directory outside of our application and there's going to be a sqlite file that is stored in that directory. And polaris also comes out of the box supporting postgres, and if you want to use mysql or some other engine, you're free to do so, as long as you configure your database appropriately in the settings file and you install the appropriate connector. Django comes out of the box with psycho pg, which is a connector for postgres, but if you + +[33:00] Want to use mysql or something, install your connector and then configure your database and it should be fine. Okay, so now that we're about to configure our database, we're going to do something a little different. Instead of having this config in database, we're going to use django environs db function, which is something that I like to do because it simplifies. It just simplifies the configuration. So, instead of having that dictionary, we're going to have a django environ m d or no, it's going to be m d b and then within. Actually. So I'm gonna look at a project that I ran through prior to building this, because I've done this before and making sure I'm doing it right. So let me look at the code here and just make sure I'm doing this correctly. Let's just copy that. So we're going to configure our database to use an environment variable to find the db and + +[34:00] If that doesn't exist, then we're going to have an sqlite file that we use. So, as you can see, we're using environments database and we're going to look for the database url in the environment. If it isn't there, the default url that we're going to use is an sqlite path looking for data- db sqli3 within our project root directory. Now that we have our database configured, let's try to create our database with python app slash, manage pi migrate, boom, our database is created and all the migrations have been executed, so our database is in the appropriate state. That is awesome. So, again, polaris is a application. Right, it's not just an SDK or a framework. It comes fully implemented with endpoints, database models, you know, static assets. It's a full fledged app. The difference is that it also provides a framework or an api interface for you to customize its behavior. So + +[35:00] It holds two different tables, two database tables- asset and transaction- and we're going to create our asset. Right now. This is the asset that we're gonna anchor on our server. So we're gonna go into the python console, load the model for the asset object and create nasa in our database. So we're in python from django, from polaris, we're going to import our model and in our model we're going to create an asset object with the code srt, because that's the code for the asset that we're anchoring and the issuer is going to be the address of the issuing account. And so when you're setting up your anchor, this is going to be different for you, right, you're going to be anchoring a different asset with a different code and you're going to need to actually issue an asset. And we have materials, documentation that documents how to issue assets. But for now, + +[36:00] We're just going to use the srt asset here. So here is the public key for the issuer, and I'm actually that's all I'm going to do for now. I'm going to also enable sep24. This asset object also needs the distribution seed. So there's two accounts for every asset: there's an issuing account and there's a distribution account. The distribution account is the one that's actually going to be receiving and sending payments, and so we're going to need to control this account from polaris. So we need the secret key for that account, and I'm not going to show you right now in this screen, but I'm going to add it to this asset in another screen that you guys can't see. So, yeah, just give me three seconds to add that. And + +[37:00] This distribution seed, by the way, is going to be stored in our database, and this is always something that you'd want to make sure is secure. It's going to be encrypted in the database. So whenever seeds are stored in the database, they're encrypted, but their own, but then they're decrypted when they're brought into memory. So I'm going to import or I'm going to add this secret key for the distribution account of srt and it's going to allow my polaris to control the distribution account. Cool, okay, so I just added the distribution seed and so now if you look at the asset in the + +[38:00] Database, just look at the distribution account, it generates the account from the seed that I entered. So that's available as well. Okay, now that we have everything we need in that department, in the asset department, we're going to test out our anchor, so let's go ahead and do that. Okay, so we've configured it. Everything is correctly configured. So let's actually run our service and see what we got here. So I'm going to go to manage pi and run the server. Okay, cool. So, as you can see, django has a server running at dot localhost 8000 and so if we go there, you can see that we actually don't have anything on our route and that's okay. But we do have endpoints for step 24, we have the authentication server, which is septen, and we have the tomo file. + +[39:00] So let's check out our tomo file. So this is a very empty tomo file. It has the standard, you know, pages that you would expect from an anchor, but it's a locking sum. So polaris implements the tomo file, but it allows you to customize and to add things to it. So this is the first integration that we're going to tell polaris that we have additional information to display in the tumble file. So we're missing information about the currency. Is it anchored? Is it a crypto asset? Is a fiat asset? What kind of asset is it what? Who is the organization behind this server? You know what's their contact information. This kind of stuff is missing from the tone when we're going to add it right now. Okay, so let's get out of our settings. We're finally done with the settings. We're done with the environment. The urls are set up. Everything's good there. Let's go to. Actually, we're going to create a new file, so let's stop our server process for a second. + +[40:00] We're going to create a new file called app slash, integrations, and this file is going to contain all of our custom code. All the code that augments polaris is going to be in this file, and I'm also going to create an apps pi file. Now, apps pi is a special file that django looks for in every application that's installed in the installed apps list and specifically, it looks for a app config object within apps pi and it runs any code within the app config class before starting the application. So this is a good place for us to register our integrations. It's only going to do it once and it's going to make sure that polaris has the custom code that we write. So, from django app import, app config is + +[41:00] Gonna inherit from app config the name of our application. And this is a django thing, by the way, if you look at the documentation, django outlines what this is and why you need to do it. So we're just gonna, again, our app is named app, nothing complex about it- and then we're going to have a ready function, and this ready function, is going to be run every time we start our application and it's just going to do any kind of setup necessary before running our service, and so what we're going to do is we're going to register our integrations here. So we're going to do from integrations import. Now we don't have anything to import right now, but we're going to import this tomo function first and then from polaris integrations we're going to import the register integrations function. Now it's actually not auto completing, which is kind of annoying. I'm going to see if I can configure, see + +[42:00] If I can configure the virtual environment right now so it knows what django version we're using and actually auto corrects. And sorry, one second here: how are we doing on time? We're a little pushed, So, okay, this might go over a little bit for any of you who have just strictly an hour. I apologize, but we're gonna be in and around an hour. Hopefully I don't need the autocomplete stuff, but it will help for sure. Okay, yeah, no interpreter, let's use the local one in our environment, set it up. Okay, now we have django available in our ide and actually knows what we're using. So, register integrations: we're going to register our integration that we haven't even wrote yet. And + +[43:00] Then why does it say that it doesn't have any of this. That's concerning I don't know what's going on with the highlighting right now. I thought I just configured my virtual environment correctly, but maybe I didn't. I'm not going to spend too much more time on this. Yeah, actually, this is not right. This is our, this is my system, python. Let's add the environment that I have in my. There we go okay. Now we're using the virtual environment. Let's give this a second to adjust. There we go okay. Yeah, but we don't have this tomo file still. But we're going to register our integration. So polaris provides this register integrations function and you can look at the documentation on it here. Registering integration: this is where you add the custom. You at this is where you add the custom code that you write to polaris so polaris + +[44:00] Can use it, and so, as you can see, this is the exact same code that we're writing right now. We're registering our integrations and we're only going to register the ones that we have, so we're not going to register all the ones that are listed here, but let's get to it. So this is the code that's going to run, and it's only going to run as long as we have this default app config, environment or variable here. So I'm going to go apps dot, or, sorry, app dot, apps dot, my app config. That's going to tell django where to look for the code that needs to run before the application starts. Okay, let's go to integrations. We're going to just define this tonal function. It's not going to have any arguments, and we are just simply going to: yeah, that's all we need, actually. So let's let me show you what happens when we just have this. So we've registered + +[45:00] The function, but the function has no code. Let's see what happens when we run the service. So it broke, and right now polaris is in debug mode. So you're seeing this nice UI that shows you what's going on in the code. Typically, it won't show you that. I'll just give an error page if you're in production. But what's going on here is that the integration that we provide, the tonal function- it's not providing what polaris expects, and so you're going to want to look at the tonal function that we need to integrate. So this is the step one integration that we're doing right now. We're integrating our tomal function, and our total function needs to have a dictionary returned containing any of the keys, the level keys, right here. So let's actually do that, let's return a dictionary and let's do the documentation section first. Now + +[46:00] You can look at step one to see what fields that you can list here. So, within documentation, this is the information I'm gonna fill out right now. These are the properties that you can add. So let's add an org name. Let's add a- we don't have a logo. but I'm not going to get. We have a logo, but I'm not going to get it right now. That's our home domain and that's all I'm going to provide for the organization. But you get the point. You can add any of these other keys as well. Let's add some point of contact information. So polaris doesn't know who's actually building this and who's responsible for it. So that's why we have this tomo file in this, what + +[47:00] They called point of contact or principles list. So it's a list, right? So we're going to have a multitude of these. We only have one, though, and that person's name is jake urban, and that's all I'm going to provide. I'm not going to provide my email or key base or anything like that, but again you get the idea. This is all information that's going to be injected into our tumble file. Let's re, let's add some currency documentation. So let's add a currency. We're obviously anchoring srt. Actually, let's just do: let's get the and + +[48:00] We only have one asset in our database, so we're just going to get the first one, and then let's use the code of that asset and the asset issuer, okay. And then we're also going to add the status. So this, again, is a test token. Right, it's not actually real money, so this is a test token. Srt is the dominated using two decimal places, and we are going to give it a name. Okay, + +[49:00] Let's see where we're at now. So, as you can see, our code is refreshing every time, or her service is refreshing every time we change our code. And now we have some more information on our tomo file. So congratulations, we've done our first integration with polaris. As you can see, it has a principles section. Now it has the name, that of the principle for this service. It has all the information on the currency section that we entered, and has some documentation about the organization who maintains this service. Cool, so that is so. This pattern of writing code, registering that code with polaris and then having pillars use that code. That that's the pattern of polaris. It has a lot of stuff that can do on its own, but when it needs extra information that it can't automate, it requests it from you via integration functions. + +[50:00] Okay, so we are doing decently on in terms of timing. We have a couple more integrations to implement, so now we have our tummle. Let's see how far we can get in the demo that I showed you earlier. So let's go to this demo spot. Let's look at our demo site. Let's look at the config. We're not going to interface with our test anchor server on solar org anymore. We're going to actually work with our local machine. We're going to use the same secret key for deposit and withdraw of our srt and we're automatically going to walk through the process. I'm not gonna, you know, talk about exactly what's happening in the zep24 flow. So what happens here? Okay, well, that's good. So we have. So let's take a step back for a second. I will actually go through a little bit what's going on here. Our client, our wallet connected with our anchor. It hit our tomo file because it knows to expect + +[51:00] A tomo file at our home domain. It determined that we have a transfer server. It got the information on our transfer server. So this is our, info endpoint and we actually haven't done any work here. Polaris does this completely on its own. Polaris responded with the client, or responded from, responded to the client all the information about the asset. So, as you can see, we're anchoring srt for deposit. It's enabled. We have a minimum maximum amount as well as a fixed fee in percent. This is going to be zero for the purpose of this demonstration, but you can change these. And then we actually went through authentication. So for step 10, authentication. You actually don't need to do anything either. Polaris influences straight out of the box and it also implements the deposit endpoint. So, once we got authenticated, we made a request to make a deposit, or the wallet did, and we responded. The anchor responded with a interactive url and + +[52:00] The wallet opened up this interactive url for me, the user, to fill out. And so that's what this is: a out of the box default. Don't do anything. Polaris view here of this interactive flow. And so this is what the user would see. And I'm just going to say: I'm depositing a hundred dollars. There's no fee, right, so it's just a hundred dollars total. And when I deposit- okay, that's a pilaris thing. If you wait too long on the first screen, it can authenticate your session. So it's just 403. Let's go through this whole thing again so we have a fresh token and get through the flow. So 100, oh, okay, so this is actually a key piece of development locally. So it keeps on giving us 403, and that's because polaris, by default, expects to be working over https and if you aren't on https, it doesn't allow you to go into the interactive flow. We can turn this off by adding a environment variable called local mode, okay. + +[53:00] So now that we have this process and we're running in local mode, let's see what happens. Okay, let's try that one more time. Let's restart this service. Oh, actually, yeah, I think that's still. That makes sense. The code, the service restarts when the code is adjusted, but not when the environment's adjusted. So let's do that now that we've added our local mode environment variable. Sure, okay, so cool. It successfully, or it accepted our deposit request and it's now the anchor we're building is waiting for me, the user, to send the funds to the anchor. Now, on mainnet, when you're doing this for real, you're going to want to periodically ping your banking connection and see if I have + +[54:00] Sent the deposit that you have, that I have initiated, right, and so again, there's a process in the background that periodically pulls all the pending transactions, all the transactions that we know the anchor knows we can expect payments into our bank account for, and once they're there, once we determine that a user has made a deposit into our bank account off-chain, we're going to then going to deposit the same amount of funds on chain, and so that's how an anchor works with deposit. But, as you can see, nothing's happening right now. We're just continually pulling the anchor and it's still in this pending the user state, and that's because we aren't running a process on our server that checks for pending deposits. So let's get out of this interactive flow here. We're not quite done yet. Okay, let's stop the server. So this is a good point to talk about polaris. + +[55:00] So polaris is a web server, right, it implements the api endpoints defined in step 24, but there's also a variety of other tasks that polaris needs to perform in order to function properly. One of those is checking up on pending deposits that we can expect from users, right, and so in order to actually check up on this, polaris comes default with a pull pending deposits command line tool. So what you would do is you would have the server running and you would also have this pull pending deposits process running as well, and it's going to loop. We could run this once and just check once for pending deposits, or we could loop and run this. I misspelled. It sounds like pull pending deposits. Okay, so complain to me, though, because I haven't implemented the integration. It says: you know, you're not ready to run this command line tool because you haven't implemented pull pending deposits yet, and + +[56:00] You're right. So let's go to integrations. So our first integration was just a function that we pass to polaris, but this second integration is actually a part of a bigger class. So we're going to implement a rails integration class. So rails integration is a class, an integration class that we're going to subclass and implement the functions outlined in rails integration integrations. Okay, so we're going to define this pole pending deposits function, and it comes with a bunch of type ending that I don't have import imported currently, okay, + +[57:00] And we actually are just going to return a list of all depending deposits. So this is polaris. So polaris is going to call this function periodically from the pole pending deposits process that we run and it's going to expect that we connect to our bank, look to see if there are any pending transactions that have actually been sent to us and then return the ones, return the transaction objects that are passed, return them back to polaris so players can actually submit them to the network, right and so, because we're not actually going to receive any payments on testnet, we're actually going to just return every pending transaction and mark them essentially as ready, and so this tells polaris to go ahead and move on and submit the transaction. So let's go ahead and run this again. Let's run our server function and now let's run our python + +[58:00] App, manage pi pull pending deposits process and loop it. So it's always running okay, it's still giving me problems because I actually haven't even registered this function right. So now I have my tomo, but I also have my. So now I have my tomo, but I also have my rails integration class. So I want to register my rails integration class with the rails keyword of the register integrations function. Boom, so now my pole putting deposits code is registered with polaris and running the pull pending deposits function was successful and I have my server running. So let's go through a process again and see what happens. So this is the same as the last time. I'm going to just say I'm going to deposit 100 and polaris is just going to move on. It's going to assume that I sent it. So Stellar is actually executing the transaction now and it's actually on the seller network. It's + +[59:00] Giving me srt in the tote in the wallet that I used. Boom, there you go, complete. So if I were to look at the account that I'm using for this wallet right now, I would see that I have at least 100 srt in my account. The polaris server, using the asset that I'm issuing, deposited funds into my seller account after me entering information. So there you go. That's the deposit endpoint. It's fully implemented, ready to go. It is 11 o'clock an hour on the dot and you have a deposit flow working. Now there are other integration functions that I need to implement for withdraw. I think at this point, I have to decide: do I want to in terms of timing? Do I want to go further down and build out the deposit flow, or do I want to build with draw flow and just not go into detail in the deposit flow, because there's a lot more you could do? I think I'm going to do the latter, so I'm not going to do this in the talk. But there's + +[01:00:00] Tons of customization that you can do using the integrations that pilars provides for the deposit flow. So, again, you can customize the UI, you can make it a different color, you can change the forms presented and then you can add forms. So, right now, in this demonstration, we're just asking for the amount, but we actually don't even collect anything about our users and again, this is custom to each anchor, right? So polaris doesn't do this automatically. Instead, it expects you, as the developer, to provide django forms. That's going to be presented to the user and then you can process. So I'm not going to go through this, we just don't have the time. But this integration, this, these section of integration functions, are called form integrations and they're actually available on the in the documentation. If you go to step 24 integrations, SEP 24 integrations, and this form integrations section outlines what I'm not going to do in this talk today, which is provide content for every transaction. So this is providing + +[01:01:00] Forms and UI customization for every form that you need to collect: on the user, so if you need name and email and photo ID and anything like that, you can ask that from the user using polaris, using these integration functions, as long as you register them. But we're not going to do that. Instead, we're going to prioritize having a withdrawal flow, work, and to do that, we're going to run the entire service, so not just pull pending deposits. We're going to run every process that requires, and to do that we're going to use docker and docker compose. So I'm just going to copy and paste the docker compose and docker file from this project that I've already written. So copy docker file, copy docker compose and I'm going to go into the anchor that we're building right now. So we have these two files now and, if you look at them again, all the source code for this is going to be on my github. But + +[01:02:00] This is essentially a config file that's going to run all the processes that you need to run in order to run a deposit and withdraw: step 24 anchor. So, as you can see, we run the server, the thing that actually runs the step 24 protocol. We run the pull putting deposits or, sorry, pull pending deposits process that I showed earlier. There's also three others for withdraw, and I'm just going to speed through this because we're going to do some questions afterwards and I don't want to go too far over. So we're going to build. Actually no, before we build our application, we're going to implement our reg, our integration functions for withdraw. So withdraw has some integration functions that it requires on top of what it. You know what you have for deposit. Specifically, it has this outgoing execute- outgoing integration function that you execute the transaction, this transaction or this function- polaris calls expecting you to actually make + +[01:03:00] A payment. So this assumes you've received a withdrawal payment from the user on the Stellar network and you're actually going to send the same amount of funds off-chain to the user via your banking rails. So we're actually not going to do any banking rails in this right. We're not going to connecting to any bank. We're just going to mark the transaction as completed and say that we did actually submit it to the bank, even though we didn't. This is just for demonstration purposes. On in, when you're actually doing this and writing your integration functions, you will want to connect your bank, just like in full penny deposits completed, and we're also going to update the fee for the transaction. So right before we execute, we're going to mark or we're going to calculate the fee that we're going to take from the transaction. So the user send me 100, we don't have any fees in this demo. But you can charge fees and but for now I'm just going to say zero, right, so + +[01:04:00] This function is going to be called every time we need to actually send a payment to a user and then finally, actually that's all we need for withdraw, I think, and we're going to make this multi process application by building: oh man, I need to start my docker daemon. That's loading up, so docker's getting started. Okay, it's five minutes over. I'm going to try to build the rest of the service in the meantime. I'm open to questions if we want to. I will just probably be working through this and demonstrating that withdrawal flow as I answer questions. So, to the host of this event, if you would like to give me some questions, go ahead and I'll answer them as we're working on this. Okay, + +[01:05:00] So the you know one question is: why use django? That's a good question. Django has the tools necessary to do what we're trying to do, so flask doesn't have this reusable app structure, right. So django is a project that can contain any number of apps that you can plug and play. Flask doesn't have this requirement, or flask doesn't have this functionality, right? You have a flask application and what the code is in flask is what it is right. You can't just add an app. You can install a package and use that package, but you can't just plug and play an app. So that ability to just plug and play polaris, that comes with database models and endpoints. That's unique to django, and so it just made sense for us to do it this way as long as we are using the python stack. So + +[01:06:00] We have our docker process up and we're building our containers, our containers. So again, this is: oh, what's the problem here? Service server fail to build copy failed requirements txt oh yeah, we don't have a requirements file. Let's create one pip freeze requirements dot text, and this is just putting all our dependencies in a text file. So when we launch our docker image, it can build them according to the file. Okay, so we're installing our dependencies right now, and this is probably just going to take a minute, and once that completes, we are going to compile our static assets. I think is the next step let's look at this docker file. Yeah, so we're going to install system packages. + +[01:07:00] We're going to make a working home directory. We're going to make a data directory for our database. We're going to copy our app code, copy our environment requirements files. We're going to install those requirements and then we're going to compile our static assets. Then, once we've done all that, we're going to run the run server command and actually run our application. Okay, so now if I run docker, compose up. Hopefully this works. Yay, okay, so now we have all these processes: pull depending deposits, watch transactions, the actual step24 server, and we all have it running. So let's go ahead to our demo and look at what the withdrawal flow looks like. Now, again, I'm going to deposit 100. There's no fee and for a draw, the wallet actually submits the transaction to Stellar and the anchor is going to receive that. The anchor's going to receive that payment. So I'm confirming that I want to send this payment to the anchor. The + +[01:08:00] Wallet is making a Stellar payment to that anchor right now. The our anchor service that we built is going to pick up that payment via our watch transactions process and watch transactions is going to then mark it as ready for execution. The polaris will then call our execute outgoing transaction function that we wrote and it will mark the transaction as completed. And, as you can see, our transaction has completed and so we have effectively withdrawn funds from our Stellar anchor account or from our Stellar account, and we now presumably have them off-chain in our bank account, and so obviously we're not collecting the bank details. So, like typically, an anchor would say: what's your bank account number, We need to know so that we can actually send you this money, and so we're not collecting that. And again, the documentation is there and polaris provides that functionality, but we're not going to request that information right now in this demo. Okay, + +[01:09:00] Cool, awesome. So we're 10 minutes over, not too bad, but yeah, that's how you build an anchor using polaris. There are a lot of other integrations. Polaris is pretty extensive and it's in the beta phase or not beta phase, but it's in pre 1 0 release. So there are still breaking changes that are being made occasionally, but therefore, ultimately to become stable and to make a 1 0 release that will be coming soon- okay, cool, I think we're done in terms of demonstration. Now I'm going to open it up for questions. So that is a good question. So there was a question about node js and whether or not we're willing to make a version for node js and the ques. The answer that question is: keep asking. We've gotten a handful of requests for it, but + +[01:10:00] The reality is that at the moment it's gonna be a lot of work for our team to maintain two different versions of it, so it's not worth it for us yet, but if there is significant demand, then we might deem it worthwhile. So make it known if you want to use node and we can discuss that further on GitHub or via key base or however you want to contact us. I'm trying to think if there's anything else that I should mention. So this we did step 24 in this case. Right, this is, polaris implements step 24, but again, it also implements a lot of other steps. One that we just added support for is SEP 31. Step 31 is a SEP or a standard that allows two anchors to facilitate international remittance payments. So the idea is that user + +[01:11:00] A would send money to user b by giving money to their to one acre. That anchor would send money to a different anchor and then the receding user would- or sorry, the receiving anchor is going to send that money that it received from the anchor, from the sending anchor, to the receiving user. So it's this little like anchor or sorry, user, anchor, user flow, and the cool thing about this is that users don't even need to know that they're using Stellar. It's just a payment from one bank account transaction to the next, and so plars provides the integrations necessary to support a sep31 receiving anchor. So if somebody wants to send you payments from across the world in a different currency and you want to use those funds that sent to you to then pay your own customers in their home currency. You can do that using polaris, step31 integration. Okay, + +[01:12:00] So I think that's it for questions. Thank you guys for tuning in and hopefully you guys learned a lot about polaris and how it works. If you have any further questions, I'm available. On key base. That's where a lot of our SDF employees live in terms of chat and yeah, all right, thank you for tuning in. Have a good day, guys. + +
diff --git a/meetings/2020-08-14.mdx b/meetings/2020-08-14.mdx new file mode 100644 index 0000000000..3512b70a06 --- /dev/null +++ b/meetings/2020-08-14.mdx @@ -0,0 +1,75 @@ +--- +title: "Deposits and Withdrawals in Vibrant" +description: "An overview of how the Vibrant wallet uses SEP-24, anchor services, and JavaScript SDKs to connect real-world bank accounts to the Stellar network." +authors: [morley-zhi] +tags: [community, SEP-24] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session explains how Vibrant, a Stellar-based savings wallet, enables users to deposit and withdraw real-world currencies through anchors using SEP-24. The talk focuses on user experience first, showing how non-technical users can move funds between bank accounts and the Stellar network without needing to understand blockchain mechanics. + +From a technical perspective, the presentation walks through Vibrant’s React Native architecture and its use of Stellar’s JavaScript Wallet SDK. Morley Zhi details how the app coordinates authentication, deposit initiation, transaction tracking, and notifications while relying on anchors to handle KYC, banking rails, and compliance-specific workflows. + +### Key Topics + +- Vibrant’s mission to protect users from currency instability by providing simple access to tokenized fiat on Stellar. +- The role of anchors in bridging cash, bank accounts, and credit cards into on-chain assets. +- SEP-24 deposit flow from the user’s perspective, including interactive anchor-hosted webviews. +- Use of the JavaScript Wallet SDK’s deposit and withdrawal providers to simplify SEP-24 integration. +- SEP-10 authentication handling via the SDK’s key manager and challenge–response flow. +- Tracking transaction state changes using deposit IDs and long-lived “watch transactions” calls. +- Handling interrupted sessions, cancellations, and stale deposits to keep the UX clean. +- Push notification architecture driven by backend monitoring of on-chain payments. +- Withdrawal flow differences, where users initiate transfers by sending Stellar payments to anchors. +- Practical limitations and future plans around supported currencies, countries, and SDK language support. + +### Resources + +- [Vibrant website](https://vibrant.cash) +- [SEP-24: Interactive Deposit & Withdrawal](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0024.md) +- [SEP-10: Web Authentication](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0010.md) +- [Stellar JavaScript Wallet SDK](https://github.com/stellar/js-stellar-wallets) + +
+ Video Transcript + +[00:00] Hi everyone, my name is Morley Zhi and welcome back to Stellar's engineering talk series. I lead the app team on vibrant and I'm going to talk to you today about how our app handles deposits and withdrawals. At the end of my talk I'll field any questions you have. I think there's a qr code at the bottom of the broadcast, or you can drop the questions in the youtube chat and I'll answer them at the end. So first I'll briefly explain what vibrate is, in case you haven't heard of it. So we just launched this week in the app store. It's a soft launch for now. It's an app that lets you deposit local currency and convert them to and from the usd token on the Stellar network. Like I said, we've soft launched this week. Our first market is argentina, because that's kind of where people will see the most + +[01:00] Benefit and our goal is for people to protect themselves from currency risk and we're hoping that the app is easy. It is easy enough to use so that people, ordinary people, can use it, not just people who are really into current cryptocurrency and understand, you know, how the Stellar network works. That way, the most amount of people can use the product and realize its benefits. So for this app to work in that, with that goal, people have to be able to take their existing money, which can be in cash or in their bank accounts, and get it onto the seller network. The vibrant app doesn't actually do this step, so we use third parties, which the Stellar network calls anchors, to handle this exchange. So first I'll show you what it looks like for a user and then I'll go into the flow and show you what's happening under the hood. So + +[02:00] We're going to go through a test net version of the app and we're going to use the Stellar reference token, which is kind of a reference implementation for step 24 and has the same deposit- you api- that the actual production anchors use. So when a user wants to add tokens from outside the app into the app. They have a few options. The one that we're going to talk about today is getting money from outside the Stellar network into the network. So again, this cash, credit card or bank account deposit. So each anchor has their own policies about minimum and maximum deposits. So we have to show that to a user and we also want to give a user a rough explanation of how long they can expect this deposit to take. The user will see a UI where they can enter their deposit amount and any know your customer + +[03:00] Information that the anchor requires. And then the user will see instructions about how to complete the deposit. So if it's a cash transaction, they'll see how to deposit the cash. If it's a bank transfer, they'll see instructions of what they can enter into their bank. If they're deposing with credit card, they'll see a credit card form. Basically, it'll tell the user what to do next after they've done the step. If the user has the app open, when the anchor, when they're finished with their part, and it's the anchors turned to do the processing, then they'll see a confirmation screen and when the actual deposit resolves- much later if they're- if the app is open- they'll see a notification in the app. If the app is not open, they should see a push notification. So I'm gonna go through the exact same flow and tell you the precise api calls + +[04:00] And, specifically, the SDK calls that we use to make this happen. So our app is written in react native, so we use a javascript wallet SDK that Stellar maintains to handle the interactions with the anchors the wallet SDK contains several tools that make it easier for wallets to interact with the Stellar network, and one of the most important ones that we use is the deposit provider. There's also a corresponding withdrawal provider and this just provides a really easy to use api for step 24 and some common use patterns that you'll need to do with them. So, firstly, we want vibrant to be as easy as used for as possible for users. So that means we want to pick tokens that where we know the anchor that is issuing them, we understand that their business practices are + +[05:00] What we need, that they're legally their sound and their country, and that their user interface is clear and understandable. So for now we have a hard coded list of the currencies that we support and each time we call out to an anchor we need to authorize that call and let the anchor know that the person who's making the call has ownership over the Stellar token that they want to deposit, withdraw from. So the way we do that is we request a step 10 auth token from each anchor. Now the actual Stellar key on the app is secured with the wallet SDK's key manager class. It, the key manager class also has a function called fetch off token and that handles the full exchange of the challenge transaction and signing the challenge transaction. It basically handles that hand off so all the app does is call fetch auth + +[06:00] Token on the key manager and then we also call deposit providers, set up auth token so that subsequent requests using deposit provider will be authorized with that token. Now there's an implementation detail right now where we don't- we've chosen not to worry about timeouts and we just call. We just fetch the auth token before every call. But in the future we might have smarter timeouts because for each step 10 call there's two round trips and that can get expensive, especially on mobile where the connections aren't as good. But for now we don't want to worry about timeouts, so we just get an auth token before every api call. So on this screen when we need to show the users roughly how long a transaction will take to process and minimize the maximums, so the positive provider has a function- get asset info- that fetches these minimums and maximums. It does not have- I don't think it has- or + +[07:00] It does not have- I don't think it has- or it has inconsistent support for processing time. So for now those are hard coded on this screen. When a user taps the continue button, we use a the start deposit function on the deposit provider to initiate the deposit and this request includes some information that makes the deposit a little bit smoother. So we send to the anchor the wallet name and the url of the wallet and that's so. In some cases the anchor can customize the UI to be closer to the wallet. We also include some information that pre fills some know your customer fields, so that right now that's email, phone number and name. That's basically all the information we collect from that vibrant collects from the user again. That's all in the goal of making things as easy as possible for the user. The anchor will respond to the start deposit request with two things. One is a url and the + +[08:00] Other is the an ID for the deposit. We're going to use that ID a little bit later. but for now we use a webview to load the url and this loads a user interface that is a web user interface. The anchor manages that user interface. They're in charge of everything that occurs in that web view. This is going to be KYC info amount. They're going to generate those deposit instructions later, basically anything that the anchor needs to be able to do this deposit for the user. Since the web, a web view is a black box. We can't really tell when the user is done and when the anchor has to take over. But the app needs to react that information. We don't just want to sit on a page. We want to be able to know when to move on from it. So to do that, we need to ask the anchor for all that information. + +[09:00] So in order to do this, we need to use the deposit ID that the anchor gave us earlier. So firstly, once we have the deposit ID, we save it to our servers, so that if the user closes the app and then opens it again later, we can resume this session again. And we use a function in the deposit provider called watch all transactions. It's basically. It basically keeps a channel open to the anchor and updates, provides a callback whenever that transaction changes status. So this is kind of the object that we get from the anchor, and the important field that we're looking for is a status field and that's lets us know where the transaction is in its workflow. Usually anchors only create this, start returning this object after the user finishes out the form. + +[10:00] Sometimes it happens slightly before or after. I think it will. The status will usually be incomplete if the user has is still filling out details or the anchor still needs to collect information once the user is. So another status it could be is pending user or pending user transfer start. So whenever the status is describes a state where the user still has to fill out information, we continue to show this screen and again, we, if you close and reopen the app we use the depos deposit ID to resume the deposit and these this object also has a more info url that can resume the deposit. So what? When a user is in the state, it's possible that they change their mind and they don't actually want to go through with a deposit anymore. Maybe they don't, + +[11:00] Maybe they just see a reason they can't do the deposit. So when a user is in a state. We show this cancel pending transaction button at the bottom of the screen and behind the scenes, all that does is remove the deposit ID from our servers. So we no longer keep watch of that particular deposit ID and it lets the user create a new deposit intent if they want to. There's we also do things like forget deposits that are longer than an hour and old, just so it keeps the UI clean. So if we get a transaction status back from the anchor and it's a status that indicates that the anchor is waiting for, or the user is waiting for, something that the anchor has to do, which is basically pending anchor, pending external, pending trust, pending Stellar. We should. That's when we show this processing confirmation screen. + +[12:00] We also have a backend service. So this is how we implement the push notifications. We have a back end service that watches every transaction on the Stellar network. Whatever a transaction, a payment transaction- comes in, it checks to see if the recipient of the transaction is a vibrant user, and if it is, then we, if the recipient is a vibrant user and the sender is an anchor, then that back end service will call out to google or to apple to send a push notification. Withdrawals work basically the same way. The only difference is after this request will be the app will make a Stellar payment to the anchor to kind of kick off the first step. So whereas during a deposit we're waiting for the user to send funds to the anchor via their bank or credit card, during withdrawal the + +[13:00] User is sending funds to the anchor via the seller network and this is just an ordinary Stellar payment. So that's pretty much it. It's not very complicated, but it wasn't. It was complicated to figure out the right steps to get there, but there's not a lot of stuff going on. So I'm happy to answer any questions that anyone has as they come in. Okay, so the first question is: in which countries is vibrant eligible to use? So right now, I think I haven't looked at the actual app store distribution list lately. I think it's only released in argentina, but it could be wrong, it might be worldwide. We right now we only support usdx, which is anchored by anchor usd, and + +[14:00] Arst, which is anchored by stablex. We're planning to support further tokens and further geographies, but for now, our efforts in terms of marketing and social media and you know, getting people install the app, is focused in argentina. But I think you can install the app no matter where you are. Someone asked: can payment be reversed from user input? Address error, so I think it's possible to do so, but it'll occur the way any bank transaction occurs: which is on the anchor side, so if the user enters in the wrong credit card number or something, the anchor should have all the information to resolve that. They have the user's contact information, + +[15:00] They have all their details. So it's really it's kind of the decentralized ethos that we have that Stellar has created this anchor system, and the user's kind of interface with the anchor to deal with any transaction issues? Someone else asked: can the user send money directly to another user's bank account? I think this feature is planned in the future, but right now in vibrant all person to person transactions use the seller network using Stellar payments. So each party will have either had to get funds from someone else or done a deposit like I showed you to have money in the app. + +[16:00] Someone asked: are these libraries for using SEP 24 available for golang? Right now the wallet SDK is written in javascript and there isn't any project to port it to other languages. The go- I don't intimately know the state of the go SDK. It may have some of these, but these are. The wallet SDK is only an interface into that sort of cleans up all these api calls. They're at heart, they're just http requests. So even if you don't, we don't have a wallet SDK for go- it's still possible to reproduce all this functionality using a normal http request. + +[17:00] I think some new questions are coming in that they're sorting out. Okay, looks like that's it, so thanks everyone for listening to my talk. If you have any questions, you can reach me at morley, vibrant cash but otherwise, thanks a lot for listening and see you next time soon. + +
diff --git a/meetings/2020-08-28.mdx b/meetings/2020-08-28.mdx new file mode 100644 index 0000000000..ac47717b7d --- /dev/null +++ b/meetings/2020-08-28.mdx @@ -0,0 +1,136 @@ +--- +title: "Decentralized Community Funding" +description: "An overview of SCF 2.0, including Lab and Seed funds, quadratic voting, a panel-of-judges finalist process, and updated submission workflows." +authors: [kalepail, kolten-bergeron] +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session introduces the redesigned Stellar Community Fund and explains why the program evolved after multiple rounds of community feedback. The update separates funding into two tracks so that smaller experimental efforts and larger, long-lived projects can each receive support that better matches their scope and needs. + +The discussion also covers changes to how projects are selected and funded. A panel of experienced community members reviews submissions and advances finalists, after which the wider community allocates funding using quadratic voting. This structure is intended to better reflect voter sentiment, reduce fatigue, and improve fairness as participation scales. + +### Key Topics + +- The mandate behind the Stellar Community Fund and its evolution from earlier grant programs. +- Differences between the Lab Fund and Seed Fund, including cadence and funding size. +- Why a panel-of-judges stage helps improve finalist quality and scale community voting. +- How quadratic voting captures intensity of preference rather than simple up-or-down votes. +- The use of flagging to help surface potential anomalies for reviewer attention. +- An overview of the new submission and voting experience. + +### Resources + +- [Learn more about the SCF](https://communityfund.stellar.org) and how you can submit your project built on Stellar. + +
+ Video Transcript + +[00:00] Does that mean we're actually live? It's likely. Okay, would you like to take it away for us? No, dude, you go ahead and intro. All right, welcome + +[01:00] Everyone. It is fantastic to be live once again with my good buddy colton. We're to be talking about Stellar community funding. We've talked about this in the past, but it was kind of looking forward to a future date when it would actually all go live and lo and behold That day has finally arrived. And so scf 2 0 after many months of planning and building, the ecosystem team is finally ready to release scf 2 0 and that's what we're going to be talking about today. This is engineering talk, so we will get into the weeds a little bit on some of the technical decisions and implementations that we have in this new version of the Stellar community fund. But it's also going to be a bit longer than normal engineering talks, so normally in journey talks for like 15, 20 minutes. This will probably go a little bit longer than that to make sure we do justice to all the updates and have all + +[02:00] The information available to those who are interested in this update to the Stellar community fund. So I've got my fancy slide deck open here and we're just gonna start walking straight through this. I think it's worth taking a little bit of a look back as well as the look ahead to kind of see where we've come from and to understand a little bit better why, as well as where we're going. So to kind of inform our discussion here, let's look at our mandate. So the Stellar has a mandate and the SDF is aiming to follow that. This is something we make public and if you just type in google Stellar mandate, you can find this particular quote somewhere buried in there with all the other missions and visions and mandates that we follow at the SDF when building Stellar, the Stellar community fund. So one of our allocations of lumens is dedicated towards the Stellar community fund, this community funding initiative, and it gives the broader Stellar community the opportunity to vote on which projects deserve excellent + +[03:00] Grants. This is a very unique fund in that it is the only one that really gives the greater community a direct opportunity to impact the allocation of XLM grants. The SDF has a pool of lumens that it has mandated to give out in certain scenarios and to certain entities to accomplish specific goals and tasks, and one of those is this pool of lumens- 12 million lumens a year- that we give out via the community so the community can take part in allocating those. And that's kind of what we're doing with this big update: is to get closer and continue to iterate on what we've done in the past to align with this goal, this mandate, as well as getting closer. So we've been close to it, but as the ecosystem matures, we need to continuously update and make sure that we're aligned with our mandate, So a little bit of history. The Stellar community fund- I would + +[04:00] Stellar community fund. I would lump it together with the Stellar build challenge. So way back in June 2016, the SDF released a project called the Stellar build challenge and the first place winner of that they had a different categories and different, you know, first place, second first, second and third place, and first place was going to win 55 million limbs. The price of lumens back at that time was actually less than a penny. So we've come a long way since June 2016. Entries in that very first Stellar bill challenge included galactic talk and Lobstr, amongst others, so two projects that we- at least I know I- use just about every single day. And then we move all the way through September 2018 to the last spc. So the Stellar build challenge was shut down at the number seven and then, shortly after that, zach- who hopefully a lot of you in this community know- the hero really of the sbc kind of- took over ownership of that and then + +[05:00] Transitioned. Or, as the spc went out, he brought back in what we call the Stellar community fund, which was really starting to align more closely with the community vision of the mandate to have the community giving away. These lumens are allocating where those 12 million lumens should go, and so that was carried out by zach. He built it, designed it. We've been running that for a little over a year now, five different rounds, and now we find ourselves here, August 2020, with the scf 2 0 and these different sbcs and scf. Like they were all good and they all worked well in their time. There may have been things that would could have been improved or changed, but we did that right. So, as things can be better, we make efforts to improve them, and that's really what the scf 2 0 is trying to do, and it's making maybe a little bit bigger stab, an improvement, something maybe akin to what zack did when he + +[06:00] Initially created or invented the scf, the original scf. We're kind of taking a big step at that. So, rather than just adjusting some rules, we're actually restructuring it in a pretty significant way, taking the best of both the spc and the scf into a new fund which we hope will provide a fantastic funding mechanism for a long time yet. Before you keep going- I do think it's worth highlighting the original, the very first round of scf. Do you remember how voting took place? I do yes. The very original round of sef. We did a vote in a reddit thread where people just like typed in their preferences or their votes and we had to like, hand count them. Zack hand counted them and I hand counted them. And then a community member came up with a script to help count them, so we were like manually counting comments in a reddit thread. So even scf 1 0 has come a long way since the very first iteration. Yeah, that's exactly right. We're always + +[07:00] Trying to iterate and a lot of it again like it's not that something was bad and we're trying to make it good. It's that as the ecosystem matures, we have more members, more participants and interest grows. Necessarily the fund itself needs to change to better suit and host the kind of growth that we're seeing. A little bit of a fun fact- the spc and scf is actually where I came from. My intro to Stellar came from the sbc with entries like popcorn, color glyph and Stellar ql, and then in the scf I had Stellar auth and I think so one of the big focuses for me is I've thought about the redesign of an scf and like where can we actually make important and meaningful changes is to like the funding. Obviously, a big focus of that is the funding right, the actual money that's handed out, but Stellar community funding is as much about people and relationships as it is about giving money away. If you + +[08:00] Talk to winners or participants that really took the scf or sbcs seriously, a large majority will say that the relationships and feedback that they received were actually more valuable than the money that they received. The money is obviously helpful and that's why you're here, but a huge part of participating in this is also the relationships and the actual community aspect. So sure, it's funding, yes, it's Stellar, but the community aspect is more than just the world can vote. It's also the world can participate. Anybody who's interested in still, anybody wants to talk about fintech and finance and moving money around and transferring value, anybody that's interesting to they can participate and talk and give feedback and find projects that are interesting to them and find their place to communicate- whether that's competing: actually submitting entries, writing documents, you know, submitting with events or documentation or actual + +[09:00] Projects, or just combing through entries and seeing which ones you feel like are valuable and interesting and something that should actually receive funding. It's a community fund and being able to focus a bit more on that aspect is something that was really important to me in the redesign of the sef is- yes, the funds are great and we need to make sure we do the best job possible of allocating those appropriately. But a community is a huge part of this. So making sure the people are still at the center. So if I were to summarize kind of the driving principle behind the redesign, for me it's the right resources in the right hands at the right time, resources being funds as well as feedback. Right hands being making sure that we're getting these to actual projects, making sure the filtering process by which entries come in and then get funding that flow makes sense, that nobody's being overworked in the review process that there's this little angst and struggle. + +[10:00] I mean it is a competition, so you're. going to have some of that- but combing down on the unnecessary pieces of that and then, at the right time, so making sure that our funds reflect the amount of money for the time that the project has spent on it and the amount of effort that's gone in. So this to me is really kind of the driving force behind the theme of the redesign. And so, with that, those who aren't familiar with the scf: currently it's a single fund that happens four times a year, where three million lumens are given out, split between eight different projects. In this new scf 2 0 there are two funds: there's a lab fund and a seed fund, the lab fund being the smaller experimental kind of open source focused documentation events, the idea of, like seo, market presence, getting the word out there as well as real world's real world stress tests. So building things that maybe aren't necessarily perfect for the Stellar use + +[11:00] Case as it stands but might help stress test the network. That would be our lab fund and that is 500 000 lumens given out four times per year, split between about 12 different projects. And then we have the seed fund is for bigger projects. This is to help viable, innovative first mover businesses and utilities to get started on Stellar. These are projects that we hope will still be around in the prize pool, for that is five million lumens split between three to five projects, and that seed fund will happen twice a year. And so by splitting these funds I mean what we see right now in the scf is these two different kinds of entries- larger projects as well as, like hackathon type projects, and they're both very valuable to the ecosystem. Both can generate a lot of excitement. The problem is, right now they both receive about the same amount of funding. Where a lab fund, a hackathon project, really it's worth doing it, it's worth competing for + +[12:00] A much lower price, and so you'd still be willing to participate, whereas in the seed fund, if you don't get enough funding, it's really hard to keep that project alive and keep it going. And so by splitting this up into two very specific and defined funds, hopefully you'll know better which one your project fits into- and then you receive the funding that's appropriate, more appropriate for the kind of project that you have. I will say that this necessarily means that we also have to introduce a brand new component- really two components, but maybe one's more of an update. But we need to introduce the idea of panel of judges as well as an update to our voting mechanism. So we have a voting mechanism right now and we technically have a panel of judges, but right now it's the whole wide world, it's the whole community. And so let's say we have 20 entries that come in. How do we narrow that down to like a final entry, say, with 300 entries come in? How do we narrow that down + +[13:00] To some sort of manageable list that a final voting round can happen, where a reasonable amount of funds can be allocated and there's not a lot of voter fatigue with having to comb through 300 different entries. The problem right now is that the community- the same community technically- will vote in both of those. So you're voting once, you're picking all the projects, you're combing through lots of entries and then you kind of have to do a whole thing over again in the final round. So you narrow down and then you have a final vote to allocate funds in this new scf 2 0 we're removing, or really splitting that, so there'll be a panel of judges which is a very select group of community members who have dedicated a lot of time and effort and energy into Stellar, building on Stellar, so they maybe have won spcs or scfs in the past. They might be part of the infrastructure grants or the enterprise fund- people that are really core to the Stellar ecosystem, as well as a few SDF + +[14:00] Employees like myself, colton, zack, anka and others, and then those the role of the panel of judges will be to filter through all of the entries that come in for either the seed of the lab fund, and narrow that down to a final list that they feel is of highest quality and then those finalists will make it into the community round voting, which is this flaggable quadratic voting, and with this we're really trying to add velocity or add sentiment to a community vote. Right now it's very one sided where- or at least it's very static, where when you have a vote you can say I like this project, and this project. There's no intensity, you can't say I really like this project, or I kind of like this project, or even I really don't like this project. All of those sentiments are quite important to ensuring that the actual amount of lumens, the voting power, allows the sentiment or that curve to most closely represent the actual community sentiment that's participating in that voting round, and + +[15:00] We're hoping that through this flaggable quadratic voting that we can achieve that. This is kind of more of the technical aspect of this talk, and colton is actually he's the one who sold me on the idea of quadratic voting and I'm going to hand it over to him for a little bit. I'm going to bring up a screen here with a demo of quadratic voting and then just kind of let him explain where we stumbled upon quadratic voting, how we've implemented ourselves and why we think it's worth trying. Yeah, so we kind of came across quadratic voting when looking at how the sort of Ethereum ecosystem handles funding. In particular a project on Ethereum called git coin. They have this sort of funding mechanism called quadratic funding, which is another flavor of sort of quadratic voting where basically they allow a group of people to make donations to a project. You basically take this square root + +[16:00] Of each project or each donation you like, add them all together and then you square that and you kind of get a matching number for that poll of donations. And so we kind of borrowed from that idea and took the sort of original iteration of quadratic funding, which is ultimately quadratic voting. And, like Tyler was saying, this idea of quadratic voting allows you to specify intensity towards your preference of a boat. So in a traditional voting system where it's just one person, one boat, you have this problem where when you're voting for multiple projects you can't express how positively or negatively you feel about that project. And we thought, you know what, since we're doing this community boat. We want experts in the community to be able to double down on projects they really like. We want people who are avid users of a project to double down on projects they really like. But we also want people to be able to give super valid opinions about projects they really like. So if they think something is over performing, + +[17:00] Then they can also downvote that project, which I think Tyler will show in this demo here. So I'll kind of let you show how it works and it's much more intuitive once you kind of start clicking around and get a feel for how the voting mechanism works. But we have a better explainer on the website and we also have some resources we can provide for anybody who wants to sort of hear it out or learn more about it. Yeah, absolutely yep, and I think it's worth mentioning too, like this is an experiment in a sense, because I mean colin remind you every single time that like we're trying this, I think it's gonna be fantastic, we'll see. That's the beauty of the scf is we get to iterate and make improvements and really see what works and what doesn't. But to kind of go through a demo here, ultimately there's a prize pool right. So you've got an amount of money that's being allocated by percentage vote. So as votes are given for or against a project, their percentage compared to other projects changes and that necessarily means the amount of prize money + +[18:00] That they're winning changes. And this is something that we really want to highlight in the new voting design which, by the way, this is a demo. This isn't actually what the voting site looks like, but this is a good demo, very clean, and I hope it will help kind of allow you to play around with what you have in quadratic voting. But as you have, like, let's say, we really like this unbranded wooden soap project and we would like to give it some votes, we would like to say I like this project. If you give it a single vote, you'll see right here, by that, by adding a single vote, you're going to give it 27 69 and so that is going to increase this number by that 27. To get us to this number you've added 27 69 but that 2769 has to come from somewhere and it came from all these other projects- 227 from here, 30 bucks and two pennies from this project and like that's how you can kind of upvote, download, you can sort of take money from other projects and allocate it here. But it's important + +[19:00] To understand that's what's happening. As you start to add votes to a project, really you start with this number of credits and then, as you vote, those credits are burnt and the quadratic, the scaling part of this happens is you start to upvote or downvote a particular project multiple times. So if I click this again, it's going to be two votes, but two times two is four and so it's going to burn four credits. So we voted twice but we've burned four credits. So what that means is, as you start to click on this project more. We voted five times. But five times five is 25, so we've burned 25 credits voting for this project five times. If we start voting for other projects, let's say I also really like this refined cotton shoes. I'm a big cotton shoes fan myself. We don't want this project to lose its 27 76 so we need to make sure we get that back up. And so we're going to start to add a little bit of money back to this project so that this one comes up. And so we've added 661 to this and 24. Now, we haven't necessarily like + +[20:00] It's still coming from the same pool, it's just allocating from all these other ones, taking money from those projects and putting it back in there. But you can, in real time, start to see how your votes directly affect the outcome as it stands, when you're voting and you have these credits that you can start to burn. But instead of it just being static. Five and two: it scales quadratically so that you can't overpower a vote in a particular direction and you allow that sentiment again to translate much more closely to the way that people would actually vote and actually share their sentiment if you were to poll a community. Another thing that I think is really important: again, this is static, this is standard quadratic voting. But you might start to see a problem here, and this is this starts to get into a little bit of the issues that you find when you are giving away money in the public is manipulation and there's lots you can do. When you're registering accounts, like + +[21:00] You make sure you have accounts that are voting, are using a system that's not free to create, like in our system, we're using SMS verification, so you have to have a phone that can receive SMS messages to be able to vote. In the past, we've used key base, which has some ability to find who's voting, to decide, like to make a decision on whether or not you believe them to be a legitimate individual, human being that doesn't have a lot of copies of themselves in the database. That's voting. But manipulation out in the public is quite difficult to do. In the sbc right, we just had a panel of judges that would pick the projects who we were sure were. We knew who they were. But when you open that up and you say, well, let's open this up to a real community vote, it's great. Until the word starts to get out and you start to see, wow, we can actually win a lot of money and the manipulation becomes much more worth it. And if you don't increase the friction, if you don't add a layer of friction, then that manipulation will just start to scale. And that's really what we've seen in the past, particularly two scfs, + +[22:00] Four and five, and really strongly in five. You can go back and read our post mortem on scf5, where voter manipulation just kind of blew out of control. And we knew this was going to be the case because we were using key base and it's free to create a key base account. and so once people start to get the word like I'll just create a free key base account, start using that to vote, and so you have to find ways to counteract that. And so, circling all the way back to quadratic voting, it's not enough to just allow kind of sentiment changes. You also need the ability to sort of flag things. so if a particular project is way over performing or underperforming what you believe they should like- let's say this project down here, well, let's say a refined granite chair- just it's taking off. We like this, is they? Their website doesn't even work. Why is this project doing so well? You might be tempted to use all of your credits, right, 10 times- 10 is 100 credits to burn- and try and get them down from where you believe you know they + +[23:00] Shouldn't get this much, like they don't have anything. Why are they getting this much? But the problem is, by downvoting them 10 times, you haven't really affected the vote at all. You've just tried to counteract the bad acting that they, that you believe that they're doing, and so a better alternative is adding this idea of flaggable quadratic voting where, rather than using all your credits to counteract a bad actor, you just put a sentiment, so I want it to go up. I feel like this is under performing, like somebody's attacking this project and down voting it, and they shouldn't. And then I'm going to use 16 credits, in this case, to add a flag that basically just says: hey, moderators, can you please take a look at this? Something's fishy, and you could also go in the other direction too. You say this is way over performing. I think somebody needs to take a look at this. The panel needs to take another look. Something's fishy with this. But now you've only burned a few credits to make that flag and you still have, in this case, 83 left to just carry about your normal business of upvoting and down voting other projects as you please. So, rather than having to spend + +[24:00] All of your credits on counteracting bad actors, you can just flag it, the panel will take a look at it and you don't have to worry about it anymore. You can carry on with your vote. I think that's really important in the iteration process of figuring out the best way to have an open community fund where you are giving away a relative relatively large amount of money and manipulation is going to happen. It's just how do we counteract that and how do we allow the community to take part in fighting against that, so that the fund is the easiest, the lowest friction, but also incredibly fair and well run. And as time goes on, we'll see how this works and we'll make changes as necessary. Yeah, and we have a couple questions coming in about this. So one of them is: can you recast votes? And the answer is yes, until you like submit your final vote right, so you'll be able to kind of dynamically adjust your votes however you want throughout the system. So say, you upvoted one or two projects and you decide to change your mind + +[25:00] Before you submit, you'll be able to do that like it's not set in stone until, yeah, it's not click submit. That's a really good question. It's something I've kind of gone back and forth on. Could you like update votes? So there's two kind of problems that I see with that one, if you're able to like update votes, essentially you would make a change and then that changes public to anybody else that's also voting. And in the case of a bad actor you could create a bunch of fake accounts, really skew up the vote and then kind of reactively go back and change all of that. You know like remove all your votes or update your votes in a particular direction, and now it's not all that helpful and so I don't think being able to update your votes is all that necessary. I think would probably do more damage than it would do good. The issue then obviously is: well, if I, if I'm allocating my funds, but then the project changes significantly, it gets a lot more votes than it did when I voted, then maybe I should just wait to vote to the very last minute, and I think we'll have to see that because I don't really want people to wait until the very last minute to vote. But I don't think in general, + +[26:00] But I don't think in general that's going to happen. It's just something we'll have to evaluate and I feel like the way we've done it now is probably the best way to start and then we just have to see what happens through the first, like lab and seed funds, and then, if adjustments are needed, either on updates or like the frequency that you can make updates or I don't know there's different parameters you can add. That might help with some of the pros and cons of either one of those options. Yeah, I don't think retro, like going back and changing a vote after you submit, it will really ever be like a viable option, but you'll be able to do it before you submit the other one. Are there any features built in to prevent flag spamming? Not, I don't think, not in the UI or anything like that. There's nothing to prevent it. Like, ultimately, if people flag spam, that doesn't guarantee that a project will be disqualified or anything like that. It's more of just like a: hey, you guys really need to take a look at this. So if there's one particular like, say, one project gets one flag response, it probably won't get looked at. But if it gets 20 flag responses, then it'll probably end up getting looked at. And if somebody is just flag spamming + +[27:00] And there's no obvious manipulation taking place and they just won't be disqualified or anything like that, yeah, and that's a. It's a really good question and I think hopefully a good answer to that is that flags don't actually affect the prize allocation. It's just a flag. You have to spend credits to flag things, so you can't flag every project. You can only flag up to one, less than half, or I think maybe one more than half of the projects when you're actually making a flag. But flags don't count for a positive or a negative when it comes to the final prize allocation. It's just like, hey, take a look at this, one vote will count towards a positive, and so you can add more positivity if you're like: well, if this doesn't get improved, I want to make sure that they get something. Like you could do that, but at the end of the day, the flag doesn't really do anything as far as changing the vote. Like, nobody else is going to see projects that have been flagged. It's just a note to the panels, to the jud, the panel of judges for that round, that something might be fishy and somebody needs to take a look at this + +[28:00] Entry. All right, let me double check, make sure I did not miss anything. Okay, I think we're good on, like, the specifics of quadratic voting the other did we touch on why we think like linear distribution is not ideal and we did not call them. Would you like to wax eloquently? Sure, let's talk about linear distributions. So one of the big pro you know, and we call it a problem. But it's easy to say this in hindsight. But one of the big problems we identified with the current model of scf is that the distribution of funds is really linear, right like if you look at it on a graph, it looks like something like that. But the big problem with that is that the complexity of any given project in a round can be very different. So, like a project that gets second place and one round could be like a full on business and the project that gets second place and another round could be like a hackathon project. And if you have this linear distribution of funds, that means those two projects got the same amount of money + +[29:00] Just based on which round they submitted. This isn't ideal, obviously, because those projects are of varying quality. They need different amounts of money to scale up properly. So we had a. The reason we consider quadratic voting is that the community can kind of decide: okay, look, this project is it needs way more money than the project next to it in the competition, and so we can allocate our votes proportionately and get them more money. In the current voting system you just literally can't do that. So we wanted it to be more dynamic. Somebody just asked another question. Sorry that distracted me anyway. So we wanted the allocation of funds to be more dynamic, to be able to fit the needs of the projects who were submitted and allow the community to fit those needs with their vote as well. Yeah, yep, absolutely, and I think like a linear. When you don't have a quadratic system or you don't have a panel of judges, you have to be really careful with how you allow votes to be allocated, because it would be really easy for + +[30:00] The wrong project to receive a ridiculous amount of money through manipulation. And so you kind of counteract some of the manipulation by not allowing a project to vote for themselves three times or only, and so you enforce like okay. And in the final round, for example, right now, you have to vote for three projects. You can't just vote for yourself, and that's really important, because if there is a project that's kind of running away with the vote, they still have to allocate two of their percentages: they're 33, 66, 70 percent of their vote has to go to other projects. But that necessarily means that the projection of votes, or the distribution of votes, is going to be linear, because you don't have a lot of influence over a really strong opinion in one way or another. And so once you have both a panel of judges that ensures the finalists list is always high quality, you have quadratic voting, which allows individuals to very strongly point in the direction that they want. But also, if you vote exclusively on one project, you only have + +[31:00] 10 votes, whereas if you spread it out, you have up, to like 100 votes. Like you can really spread that out, your influence out over a large range of projects, if you want to, and so the ability to manipulate, even without the other features that we have built in through SMS messaging and kind of the improvement that is over key base, that's already better, and so once you add on top of it the SMS verification, hopefully again the theory is that we'll have a much more dynamic voting mechan mechanism that will better reflect the actual entries in a particular round, rather than just kind of always being linear- yeah for sure, and like even you know if it, even if it ends up being a problem where, like, we have first place, always wait out performing, or something like that, you can always tweet quadratic voting, like, for example, you can remove one of the, you can reduce the credits from 100 to 99 and all of a sudden, in order to vote, + +[32:00] That requires you to distribute your tokens across, I think, three projects, or something like that, to use all of them. So you can make small tweets like that and it can have, you know, big effects. So if there comes a time where it feels like we need to make changes like that. We can easily do so. One of the projects that came up was or not projects questions was: will we be able to engage judges ahead of time to address any concerns they may have regarding technical feasibility or other issues? Yeah, fantastic question. Absolutely you will. The panel of judges is probably going to be about 10 individuals per round and there'll be a lot of discussion back and forth on the projects that they feel the panel feels our potential for finalists and there's still some like the panels, like we're going to have to figure it out how the judging actually works. Like if you only have to go through 20 projects, 30 projects, that's pretty, that's relatively easy to do and to give a bit more hands on experience for every single entry, whereas if you have 500- which I think our last spc had, something, + +[33:00] Which I think our last spc had, something like 500 entries, not everybody's going to be able to be reached out to and so there'll be a little bit of back and forth and you need to make sure that your entry is really strong and really solid and you don't feel like there should be a whole lot of back and forth because what you've put out there is really good. You need to put your best foot forward and really make an impression and make it a proven and not assume that if I put something out there, that's kind of half hearted that it still has a chance because someone will reach out to me that may or may not be the case. So I would always encourage entrants to really put maximum effort that they feel is worth it for them into their entry and then, as the panelists go through the entries, hopefully you get someone to kind of adopt your project and start reaching out and kind of provide some of that relationship and that feedback. And if not, the community is always available. Like there's a ton of people out there that are never going to be on a panel, but that can still provide feedback for your project and help you improve it, so that when it does come time for the next lab fund or a seed fund, your project's even stronger. + +[34:00] But it will be important to kind of serve yourself first in this competition, particularly as we start, we continue to grow and have more entries to not assume too much of our panelists but to make it as easy as possible for them to recognize that your project is insane, that it's amazing and it deserves a little bit of a longer look in the same way that you would make a pitch to any other fund, any other funding opportunity. Really make that strong sort of elevator pitch, if you will. Yeah, and somebody asks if the quadratic voting demo is on GitHub or if it'll be on GitHub later. It might be, I can make sure we put that up for sure. Yeah, it would be good for other people to look through that. You can obviously play around with it at fqv versal app but I'll make sure I have the code available for review and tweaking- sweet. And then somebody said if a project is very ambitious, + +[35:00] Its product has to be way more advanced than others, because people can sell the idea that they need way more funds when this is not true. Yeah, of course I mean that's kind of the nature of like a competition. Right, you have to sell people on your vision and what you're building anyway. And if somebody does a better job of selling the fact that they might need more funding or something like that, and the community supports it and wants to get behind it, that's, you know, more power to those people, right, like they've done a good job of pitching the community. And it can also be the case that a project very obviously needs way more funding, but they do a way worse job of pitching their idea to the community and they won't receive as much funding. That's kind of like an inherent feature of competitions rather than like some problem that you can kind of easily solve outside of having you know one entity pick where money should go. Yeah, I think it's worth speaking to that a little bit, particularly as somebody myself who competed in both the spc and the scf. It's not crazy easy. It isn't easy to win these competitions. + +[36:00] But if you treat it serious and you treat it for the fun that it is and really put your heart and soul not just into the code and into your idea and kind of- I don't know attaching yourself personally- to your project, but kind of saying that this is a community effort. I want to make something that's valuable to other people. That necessarily like, if that's a priority of yours, to make something that other people care about, necessarily that means you're going to promote it, you're going to talk about it because you want people to use it, you want it to improve their lives. If you're just making something that's fun and you don't really care if people interact with it or benefit from it, again that might be a really good lab fund entry, but you're still going to need to get people's eyeballs on it and some buzz around it and that can be a little bit challenging, I think, as developers, to like think about how do I promote this thing? I just want to code it and I understand that challenge. But it also gives you that opportunity to kind of get a leg up on other developers or entrants that aren't willing to do that and + +[37:00] Like we want projects to win, not just because they did a good job, but because other people are going to use the project that they're building and see the value and get it. if you build something amazing but nobody knows about it, that's kind of sad, that's unfortunate, and I'd like to see amazing projects get visibility and I want to help with that as well, as you taking that effort and putting it out there. But it's both of us- the actual fund itself putting facetime on to the projects that are highest quality and putting the most effort in, as well as you really selling that and saying that I've built something and it's amazing and here's why doing blog posts and putting a little bit of good care into the design, the actual UI and ux. Thinking through, what would it be like to judge this project and how can I improve that? Making sure that they're a good demo username and password available if your project has a login barrier, like simple things like that- a video explainer showcasing your team putting your face on a camera, like those things can really go a long way. + +[38:00] And that's not. It's not like the scf's the only fund. That's like that. Anywhere you try and get funding, anywhere that it's a competition. That's going to be true, that those who put the most effort into communicating and selling their project, they're going to do the best, particularly if they've spent a lot of time actually making their project good. Right, like you don't want to promote something that really isn't all that great. But if you have both of those things together, like maybe skip a round of lab fund and really dive into having both of those pieces- a really good project and really good blog posts- and combining both of those things together to really give yourself the best chance at succeeding in the lab or in the seed fund. Yeah, and it is worth noting, like I think, going forward in different rounds of seo, if we're going to do a, really we're going to try to do a really good job of like highlighting what made projects successful in any given round, that way, like everybody can kind of learn from each other and there's not this like black hole of you trying to figure out + +[39:00] You know what do I need to do to. perform well in scf. I think we're going to try to do a way better job of getting that information out there so everybody can kind of learn from each other and it makes the competition, you know, better for everybody. Yeah, absolutely, yep, some blog posts on how to place well in the scf- you know what are judges looking for- like again adding the relationship piece and putting a little bit more focus on that. I think treating the scf like it's a thing, rather than kind of just a something that's also going on somewhere out there. You know, good luck, as it's kind of been a little bit in the past, trying to focus a little bit more on this is really important to the ecosystem and building up the resources and the marketing and the communication relationship aspect of the seller community fund so that it just provides that really strong opportunity to get funding and recognition for the work that you're doing instead and really incentivize you to want to build on seller and to give you the right route to succeed, + +[40:00] Whether that's through funding, relationships building. You know, going from the lab fund to the seed fund, from something fun to a business like there's a lot of different tracks that individuals can take, but really making like, highlighting those and helping people along and including the community in that. So it's not just me and colton trying to do everything, but that the community knows where they can get involved and where the gaps are that need to be filled so that this thing can really take on a much stronger community sense, and finding where that is where that's needed and where it can best fit as the fund again continues to mature. As it has been sweet, I think we're caught up on questions so we can move on in the presentation. All right, super duper, not too much further to go. Appreciate everyone for the fantastic questions. These are obviously two very important components that really highlight the difference between what we have right now and what we're launching today, so kind of. In conclusion, two key takeaways: + +[41:00] The SDF funds projects and so if you've got a contact- you know, clients or yourself who's looking- either maybe isn't a good fit for the infrastructure again- or the enterprise fund, which we also have, but are strictly SDF managed, refer them to the scf. You know, point people towards us, get yourself involved, whether as a judge or as a participant. With a split between the lab and the seed funds, we have a strong funding avenue for more mature and serious entrepreneurs while still providing a profitable playground for new and curious developers. So we've really got that platform that's going to support a much wider ecosystem in a much more effective way. So we fund projects, but you can also help- and I cannot emphasize this enough- like we get you know this is a Stellar fund. We get that it's a fund, but let's really take some time thinking about the community aspect of this. To get involved, join the seller dot community fund + +[42:00] Key base group. I would highly encourage that. if this is something that's interesting to you- whether the design of the fund itself, the participation as a voter, or your got questions about the entry, how to enter, or you know anything related- this is kind of where the community is going to talk about this other community fund. So make sure you jump into Stellar community fund and then also make sure you participate in community discussion, whether that's on key base or in different discussion groups, as well as the voting phases. So when voting comes out, participate, get involved, you know, spend those credits, put them where you think they should go and then also help to share the story of solid entries, as well as referring those potential participants. So, as you have social media accounts and you want to get the word out of projects that you care about that. You I mean if you've put credits toward them, share their story, like, get them out there so that they can not just receive funding from the SDF but they can actually + +[43:00] Get involvement from the greater community. Maybe they come back and participate in another lab fund with new improvements and you're starting to invest and put energy towards people and not just projects, and really want to see individual developers and contributors, creators, succeed, whether that's in a particular project or across multiple entries. Starting to invest not just in projects but people is something that I'd really like to see coming out of this update. So that's the close. There's no context behind that picture. You get no context. This is inside joke. Now, this is: yeah, thanks for watching before. No. Before we wrap it up, we do need to share the announcement blog post and we need to show the site. That's correct. So you're ahead of yourself, sir. I'm gonna share the announcement blog post right now in the chat. So if people want to head over and read + +[44:00] That, it's basically like a nice shareable summary of everything we just said. Fantastic, will you be able to pull up the site? Nice, tada, amazing. So this is the all new. And how convenient. Somebody just asked what the dates are for the first round, so we can go over that fantastic question. But the new site is over at `communityfund.stellar.org` a lot of work has gone into the site over the past- I don't even know how long it's been huge collapse. She, she's a web flow wizard now designed this, built this whole site basically on our own. Unbelievable, incredible, huge props to anka for this incredible site. But yes, the Stellar community fund has a marketing page and it's not just here's the rules, it actually outlines what it is, the two different funds we've got. We got an art. We got a marketing video, what we're big leagues colton. Yeah, I know, + +[45:00] I know. But anyways, the website has a breakdown of both the funds it has, you know, a couple step by steps it's got some featured projects. It's got some cool illustrations of little astronauts which are amazing, but each tab on there. So the seed fund tab and the lab fund tab, I believe have not only prize pools but the dates on when they start and a countdown timer. Which one are we on right now? This is the lab fund, okay. So the lab fund actually, I don't know what. The today's date is, August 28th, which technically the lab and the seed fund are open for submissions right now, which I don't know if I would submit today, unless you've just been sitting on an entry, but they're officially both open right now. So you are welcome to begin submitting projects to these brand new lab and seed funds. So the lab fund obviously will close first. It's the shorter fund opens up today. Submissions will close + +[46:00] On September 21st. So according to our countdown timer, that gives you about 23 days to submit to our very first lab fund. The community discussion will then- I'm sorry, the panel discussion will begin then on September 21st and go through October 12th, then October 26th, after the community discussion, where the community will kind of go through the entries and ask any follow up questions and have an opportunity to engage in the entries. The community voting will begin. That'll go on for a week as normal on that new flaggable quadratic voting and it all happens right here. So rather than there being galactic talk and the voting page, it's all integrated straight into start. I'm sorry, communityfun stiller org all integrator, right here you can go and log in. If you are an entrant, log in with your phone number and that allows you to enter your project. If you're a panel judge- which there's not too many of you, but if you are a panel judge, panel judges + +[47:00] Will log in with their key base account. That's how we differentiate between entrants and panelists- and then you can go through your projects either to enter or to begin the selection. So this is all very obviously brand new, experimental in a sense, but we're very excited. We put a lot of time and effort into this. But as you have questions or feedback, you can reach out on that Stellar community fund key base channel or go to this connect page where we've got a form down the bottom where you can also reach out as you have issues, bug reports, other questions. If you've got partnership opportunities, anything like that, you can reach out through that form. And then just to cover really quick on the seed fund timing, you can check it out yourself. But submissions are open now and those will actually go all the way through January 18- 2021. This is because seed fund projects are big projects. These are not quick. Hey, I've got an idea this weekend. These are big projects. We want to see businesses coming out of this. That will + +[48:00] Last a year. So we're putting lots of time available for submitting your idea to the seed fund and then panels will begin selecting those January 18th. That'll go on through till March 15th where the community discussion will begin. That'll go through April 26th and then finally on May 10th the community voting will end. The community voting for this is actually two weeks, but then May 10th, the first seed fund will close out. If you have other questions about the rest of the dates, those are also in another community post that we're putting out. Talking a little bit more about the technical details behind the Stellar community fund: yeah, and if you go through some of the like submission requirements and it's still not clear, please reach out to Tyler or me or anybody else to kind of discuss where your project might fit, whether in the seed fund or the lab fund, + +[49:00] And we can definitely have that discussion. And then you know, see which part of those sort of requirements and descriptions were confusing, because it's extremely important that we clearly communicate the separation between the two. Yep, so we've done quite a bit of writing, both on those blog posts to explain the new sef, as well as on these kind of these couple how to articles, as well as the guidelines, rules, the phases in the projects or in the rounds, as well as just a look at previous winners. But as you go through that content, if there are things you feel like we could do better, I mean again, this is a community effort not just in the fund itself but in the actual design of the community fund. So we're would love feedback and improvement ideas, or are the articles that you would find helpful? You can always submit those through that form or just in the key based group to make suggestions. Sweet, and I think we've been going for a long time, so we can wrap this thing up. Yeah, I mean it's, I'm super excited, I'm. I mean, if you can't tell I'm, I've. We've spent so much time on this as someone who used to participate, + +[50:00] Like this is the fund I would have. wanted. So I'm so happy and excited to have this live. Can't wait to see what people build, watching, to see how it continues to mature and iterate. I would love to see the community get more wrapped up in the community fund and engaged and have a bit of ownership around what it is and how it operates. So excited to be at a spot where we can finally launch this new thing and really be starting continuing, but starting from a strong place to continue our community fund efforts in aligning with the mandate that we have for this particular allocation of lumens. Sweet thanks everybody. Yep, thanks, guys. As you have questions, again reach out. Otherwise, it's been a lot of fun explaining it, walking through it, and I can't wait to see what you build and hopefully won't be too long before we have another one of these live hangout shows, maybe even next week, Stellar podcast + +[51:00] Live next week. There's the plug. + +
diff --git a/meetings/2020-09-11.mdx b/meetings/2020-09-11.mdx new file mode 100644 index 0000000000..63bd844bb3 --- /dev/null +++ b/meetings/2020-09-11.mdx @@ -0,0 +1,82 @@ +--- +title: "Data Analytics on Stellar" +description: "Debnil Sur walks through an SDF analytics stack for exploring the network at scale, covering Horizon/Core data limitations, a BigQuery-based pipeline, and practical SQL patterns for market and account analysis." +authors: [debnil-sur] +tags: [tutorial] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Do you want to analyze historical trends on the network in seconds? In this talk, software engineer Debnil Sur explains how SDF designed a large-scale analytics pipeline using off-the-shelf tools, and shares examples of the kinds of questions you can answer with basic SQL. + +The session starts by outlining why Horizon and Core databases (optimized for transactional workloads and API access) are a poor fit for heavy historical analytics at scale, then walks through the design goals for a warehouse-backed approach: organization-wide access, daily refreshes, easy integration with existing infrastructure, and approachable metrics/visualization for non-technical stakeholders. + +Debnil then covers an initial implementation that exports data from Postgres, stages files in cloud storage, and loads tables into a warehouse to enable much faster queries. The talk also surveys visualization options and why Metabase was selected for an early, internal drag-and-drop + SQL experience. It closes with v1 limitations and a roadmap toward higher update frequency, better observability, and more public-facing analytics. + +### Key Topics + +- Why Horizon/Core schemas are great for transactions but slow for large historical analytics. +- Design goals for an internal + community-friendly analytics system. +- A first-pass warehouse pipeline: exports, staging, loading, and fast SQL queries. +- Metrics workflows and why spreadsheets and Python fit the reporting layer well. +- Visualization tradeoffs and why Metabase works for an initial deployment. +- Roadmap topics: real-time-ish updates, task orchestration, observability, and public dashboards. + +### Resources + +- [Voyager](https://stellar.expert/explorer) +- [Horizon API](/docs/data/apis/horizon/) +- [Stellar Core](https://github.com/stellar/stellar-core) + +
+ Video Transcript + +[00:00] Hi everyone. Hope everybody is having a fantastic friday morning afternoon, wherever you are. My name is devneal. I'm a software engineer at SDF on the voyager team. + +[01:00] We focus on exploring the Stellar ecosystem through data analytics and liquidity. Today I'll be talking about data analytics on Stellar. So the goal of our team, as well as a lot so the goal of our team, as well as a lot of the work that I do at SDF- is to do the fundamental meta work that powers the rest of the network. How can we ensure that assets have robust markets, how can we improve the visibility of the network, both inside and outside SDF and how do we use data to further drive network growth? In this talk I'll be talking about the first version of our data analytics system targeted and answering those three questions as well as many others. So who is this helpful for? Anyone interested in? One, building analytics pipelines from zero to one. I hope our experience is generally helpful to people who have a bunch of data that are just trying to build out some analytic system. Two, exploring the Stellar network, I'll present some useful access patterns, tips for navigating the Stellar data set + +[02:00] And a few illustrative examples. With basic sql knowledge, you can ask some powerful network wide questions. Three, scaling analytics from one to one hundred. If you have a working data pipeline, what's next? I'll share some of my thoughts on the subject and I'd love to hear your wants. As a brief roadmap of the talk. It'll be in four parts. First, design: what we wanted to build. Second, implementation: how we built it. Design and implementation are the meat of this talk, as it is an engineering talk after all. Third analysis: I'll display some interesting queries along with some strategic tips on using our public data set in the interest of time and network connections. Over the talk, I'm not going to live code, but I will share links for folks do their own exploring. And finally, next steps: what's in the pipeline? No pun intended for data analytics. I note that if you have questions, there's a qr code on the youtube and you can also ask questions of the youtube chat + +[03:00] And I'll be answering those questions at the end to make sure I get through all this material. So first design: to understand the design goals, let's first talk about our motivations. Why did we even want a cloud scale data pipeline? Ultimately, we want to quickly and efficiently generate the histories of assets, accounts, markets, whatever, over the history of the network. The primary use case is internal business analytics. We wanted to make it as easy as possible for our business development and ecosystem teams to understand the effects of their efforts. So Stellar uses the widely used database, as some postgres, which has been an industry standard for 20, 25 years. So Stellar Core writes data to postgres tables and then Horizon, our api, to Stellar Core uses the core data, but it also has its own database because it's a web application. So the pros of this are that it's well known and well documented. It's great for tran processing a bunch of online transactions when there's a bunch coming into you in a highly concurrent heavy write + +[04:00] Environment. The cons are also pretty well documented. It's slow for historical queries for specific column values, which makes it much worse for analytics. Horizons, database structure and indices are optimized for the exposed api of Horizon. That also means that if you're trying to do deeper queries across a bunch of different accounts or tables, it quickly becomes inefficient. So, for example, complex joins or filters become really extremely time inefficient apps and indexes. A good example of this would be trying to do analysis of the network but excluding arbitrage operations, because that would require a bunch of joints of different tables, and then now you have to make filters over a really big table. So, to understand behavior at scale, it became pretty clear to us that we would have to build some new infrastructure. It's also important to talk about our goals. So, above all, we wanted value for internal stakeholders. This is a really important node. When you're building data products, it's really easy to nerd out and build something that's really cool for + +[05:00] Engineers, but the end user is always your non technical users. If product guarantees don't align with business needs, then you won't ship the right solution for your end users and, even worse, you might just make more work for yourself because you have to onboard a bunch of people into a non intuitive system. Second, open source or open access. Open source is really core to fdf dna and we want to support open source tools wherever possible. Of course, some things require spend, like cloud infrastructure, but we want to optimize for open access in the organization, followed by easy public access. Third, a daily updated data set. Well, some businesses need real time updates. At the point that we were when we started building this, daily updates were sufficient for the course grain metrics that we wanted to do. Fourth, seamless integration with our current staff. As mentioned before, we rely heavily on postgres as our databases and when you have existing tools, you need to make sure that support for those tools is well tested in anything you bolt onto your system. + +[06:00] Additionally, it had to also be deployed easily. It had to be in house controlled on our infrastructure and easy to deploy by our site for liability and ops team. So our stack last year migrated from puppet to docker and kubernetes, so we needed technology that easily integrated with this. And for some quick buzzword explanation: puppet is a legacy system administration software. Docker lets you contain custom programs in their environment and kubernetes is orchestration software that lets you decide how those containers are then deployed and run. And fifth and finally, easy metrics and visualization. This is pretty self explanatory it lowers the bar for non technical users and it brings much more value to the organization as a whole. So many of the requirements about birth fuzzy. Some are more technical and tools oriented. So how do those break down into technical decisions? So this is ordered from most to least. Defined as a deployment infrastructure, we would use docker and kubernetes. This would be important for orchestrating software, having regularly + +[07:00] Scheduled cron jobs and the like. It was already our technical stack so we needed to play with it. Second, the cloud scale warehouse. This needed a bunch of capabilities determined by what I just said. It had to have scripting abilities, really good postgres integration and really easy organization wide credentialing so all engineers and non engineers could easily access the warehouse and make their own queries. Third, metrics and visualization. This was the least defined for sure. We decided that it would be secondary to a data warehouse because ultimately, the engine matters more than whatever you're doing on top of it. But we did know that we want this to be open source, free and easy for non technical users through something like the drag and drop interface. Let's now talk about implementation. So the first was infrastructure: docker and kubernetes. While this decision was made for us. Some details of it made everything a lot easier, which is what commonly happens when you have to integrate with an existing stack. + +[08:00] Docker based containerization gave us a really easy standard for evaluation of other tools, particularly visualization. Could it be easily deployed on our infrastructure? Second, kubernetes cron jobs gave us an easy scheduling method for running scripts deployed via docker. We could just set a schedule and run the procedure as is. It was also really easy to integrate the postgres cluster. It was easy to provision external storage in case we needed to have external memory for the data pipeline and it also allowed for retries on failure. In all, this is a really good deployment stack and it's pretty clear why it's become the modern stack of choice. It's really good for integrating a lot of different technology. It's really resilient. In general, it's pretty easy to work with no real complaints about docker and kubernetes. Second, for the cloud data warehouse, I was kind of worried about this at first because there are a ton of options, but google bigquery actually ended up being a really easy choice for us. For one, the queries are super fast. They were 10 to even a thousand times faster than some queries from the Horizon + +[09:00] Database. We were honestly just blown away by how much better it was. Second, intermediate cloud storage. Where do files live during the data pipeline? Google cloud has a cloud storage service which is called google cloud storage. This means that we can separate the pipeline into a few different parts: one: export tables from postgres to disk to upload from the disk to cloud storage. And three: download from cloud storage to bigquery. So separating the pipeline like that reduced the risk of failure and it also made retries less expensive because we would just retry one of the scripts. Third, there was really painless command line scripting. It's super straightforward to script: exporting files from postgres, then uploading to bigquery. It made creating a basic pipeline really easy to reason about from the command line and also, in turn, pretty easy to containerize because we were just uploading a bash script into the docker container. Fourth, google suite authentication. So within SDF we used g suite, so it meant that everyone had g suite permissions. It also meant it was really easy to set + +[10:00] Up and share permissions across the organization. I would say that this is like a low priority in terms of deciding a warehouse. But it is actually why we tried bigquery up. First, because it was really easy to integrate within our organization and I suspect that organizations that run on microsoft, like microsoft teams or whatever, might have similar experience with azure. So I do think it probably determines live organizational needs. Third, for metrics we decided to use google sheets. This was actually more straightforward than the warehouse because we decided that we wanted some basic data science capabilities that were slightly outside the scope of queries and after we talked to a bunch of folks around the organization, it made the most sense to surface this through spreadsheets. It's a really common user interface for non technical folks and it keeps data tabular so you can pretty easily connect. Here's whatever the table I got out and here's how it looks on the actual spreadsheet. So python scripts are also the right tool for post processing by a lot. It really is a swiss army knife for data. You can really easily read a table + +[11:00] From bigquery, run some custom post processing using common data science tools like pandas and then write the output to a google sheet. Finally, note that we deployed these scripts in a server less fashion on google cloud scheduler. Once the bigquery dataset was updated by the kubernetes orchestrated cron job, you could then trigger these scripts and then update the spreadsheet. It was a really cool event driven architecture: really simple, really powerful and honestly, I think that this sort of way to automate metrics is a really good one. Fourth and finally, visualization. This was by and far away the hardest part of the implementation, since there's actually a million tools and documentation that compares them directly doesn't really exist. So at this point, like we've settled on bigquery, so we wanted really easy bigquery integration, really easy deployment on docker and kubernetes, some nice visualizations that use drag and drop and some sql, and we also wanted to be open source. So I tried a lot of tools that fit some or all of those capabilities. + +[12:00] Some of the most prominent ones that we looked at were google data studio, apache, superset and looker. What we generally found was the tools that played nice with bigquery, usually either didn't have drag and drop or weren't free, and so we ended up settling on metabase because it honestly fit all of our original pillars quite well. It has both sql as well as a drag and drop interface. It's free and really easy to set up and I think if you need to mvp a pipeline, it's a really good option. Finally, let's talk about some of the downsides of our v1 implementation. For one, daily frequency is slow, while good enough for initial use cases. A daily update limits building out data intensive apps on seller. We want to get closer to real time updates for next version of the system. Two, observing failures is hard. We were still learning our way around kubernetes logging and ideally we want to be able to reach by really specific, granular portions when they fail. So there are good task management systems out there and that seemed like a + +[13:00] Natural evolution of the system. Three: visualizations were private. We'd love to expose our information visualizations publicly, but we can't mix the platform that we use for internal business analytics with everyone else, so we decided that we'd have to think about an intermediate solution. And four, exploring data is painful, so you'll notice I didn't actually talk about a data science platform. Above and while serverless functions provide some capabilities for robust and regular jobs, ideally the platform enables exploratory data analysis through scripts. Every data scientist now uses jupiter notebooks and we want to make it really easy to do the same on Stellar data. So now let's look at some basic analysis of some queries on our system. So for one, we'll start off with an easy query: what account has the highest lumen balance? I call this easy because it's pretty short, but it's really cool because it illustrates how powerful even basic sql tools are. So this shows some really basic sql syntax: + +[14:00] Select from, order by and limit. Those give you the tools to ask basic ordinal questions about Stellar history. Select from chooses specific fields from a table. Order by orders the results of the field and limit says how many you want to see. As you can all see the account with the most lumens is the galaxy void account, which received the lumen burn last merity. Now a medium difficulty ferry: how many payments of an asset are made daily? So we show you some new syntax here. Date converts a time to justice. Day sum is an example. Aggregation function which takes a bunch of results and then combines them into one specific number. And where can be used for various condition where clauses shown here. Finally, group by will, group by a specific field. So this lets us pretty organize pretty quickly: organize and group results by day, compute daily amounts and do some pretty good aggregations. + +[15:00] All in all, it's actually really simple to be able to make these sort of day by day analysis and it's really core part of some of the organizational metrics we track. So finally, a high difficulty query: how many trades of a trading pair are made per day? You'll notice that this has a lot of lines, but the actual primitives are pretty similar to things you just saw. It has some new syntax, so we use with as to create some temporary in memory tables to query and join. Lets us join together different tables on common fields so you can have some really big tables that you can now condition, filter, group, so on, so forth. Once again, I want to say that, like the queries are not the focus of this particular talk. The engineering is much more of one, but feel free to reach out to me either on the youtube chat or after. I'm happy to provide more instructions on how anyone can make these queries. We also put out a blog post earlier this summer that covers a lot of this and I highly recommend checking it out. + +[16:00] So forth. And finally, let's talk about some next steps to motivate these, think about the downsides from implementation and we'll talk about some of the things we could do. So, for one, going from slow to real time frequency. So new Horizon capabilities, like the new ingestion engine, help us get closer to real time because that enables much faster extraction of data from Stellar Core and history archives. Our awesome intern, isaiah turner, has been building a command line tool that uses these capabilities to read in data in close to real time and then output it in the expected schema, and the link to it is right here in the slice. Second, going from low to high observability. Fixing failures is pretty hard, but task management makes observing and debugging a lot easier. So while our current system wasn't unmanageable, the number of moving parts made reading kubernetes logs the primary solution. It's important to note that we added some sentry logging around business metric, but the thing about all of that is that it shows you that a problem happened. + +[17:00] It doesn't really tell you where it did or what to do to fix it. So, combined with the above tooling, isaiah has been working on an airflow task management system for the pipeline. Airflow is a task management system that airbnb open sourced a few years ago and honestly, it's been great in our experience at being able to orchestrate a bunch of different smaller scripts, being able to say hey, this failed and giving a really nice UI for engineers to go and just retry different parts. So this has the same overall flow as before. It reads ledgers from Stellar Core, it writes structured documents to google cloud and then it uses those as changes to the bigquery tables. So this one is almost done, it should be done next week, and the link to that, which is it's already open source, is on the slides as well. Third, going from private to soon public visualization. We're thinking about displaying markets and corridors in a public facing site like `stellar.org` it'll make it easier for prospective anchors to see what the volume looks like in and out of specific businesses and countries. We're just reasoning about the right + +[18:00] Strategy in a way that meets both internal and external needs. So, once the new data system is stable, we're going to see how you can leverage bigquery data warehousing to build a scalable web application on top of it. It's its own engineering challenge, because it's pretty hard to do that in a time efficient way, but it's a really exciting one and I'm excited to get cracking on it. Fourth and finally, data exploration is painful, so a data science platform is very much tbd. If anybody in the community wants to take this challenge on, I'm more than happy to see some community implementation and feel free to reach out to me with either ideas or a desire to try to figure out how to do it. I'm very happy to talk about it. So that's all for me. I hope you really enjoyed hearing about this pipeline and all the stuff we've been doing, and I'm happy to answer any questions. So one question that's been asked: are there any cool projects using this data set that you wish existed? So I'll start by plugging again the two + +[19:00] Things that I just said, because I think they enable a lot of the, cool projects. So, for one, I think it should be really easy to see- here's what all the major anchors on Stellar are doing- and it should be really easy to see what the volume in and out of specific quarters looks like, and it should be even easier to see what the rates look like. I think the rates are really hard to be able to do on this data set, but really powerful. So, for example, one of the reasons that the euro t to naira corridor has been killing it lately has been because the rates for that are so much better than what you would get when you have really good rates on Stellar. It makes the whole network work and I think that applications that surface information like that, like the key killer applications of Stellar, are the most useful to leverage this data set in the short term. Longer term things that I think would be cool, that leveraged this data set. I think it could be really cool to see how assets on the DEX itself can function as a hedge + +[20:00] Against inflation through sort of price histories over time. This is one of the things that people talk about as a goal for crypto and one of the things that, with vibrant, SDF, has started working on and I think that if there were projects that demonstrated that longitudinal history, it would be another really cool value for Stellar in the network. So another question: what can a community do to help expand Stellar and improve it? For those who do not understand the engineering aspect of that- so that's a good and interesting question- I will say that you don't need to understand the engineering aspect of what I just talked about to use the data set. That's one thing that we really tried to make sure we could do, which is just being able to use some basic sql queries and show some really powerful data. What can the community do to expand on it? Lots of things, but within the scope of this talk, I would say that it looks like telling people that, hey, Stellar is really cool, but then also showing them what the volume on Stellar looks like. So this is one thing that I've thought a + +[21:00] Lot over the course of the summer, when we've had a lot of volume on Ethereum with DeFi. Right, how do you show similar volume with Stellar? Because Stellar is one of the few layer ones that can actually do layer two things like that. So I think demonstrating that value like that exists, and showing people that seller isn't just a payments platform with XLM, there's all this other stuff you can do on top of it. I think that's one of the things that becomes a lot easier with really publicly queriable network history. It would be really cool for community members to promote it in a data driven fashion. Awesome. So it looks like no more questions. So last call if anyone has anything else. But feel free to reach out to me either over key base- my key base username is devnet nil- or over twitter. My twitter username is debna. Sir + +[22:00] D e b n I l s u r I'm happy to talk about things Stellar related, as well as waste lovers, data set in the community. + +
diff --git a/meetings/2020-10-09.mdx b/meetings/2020-10-09.mdx new file mode 100644 index 0000000000..7c0d73df5a --- /dev/null +++ b/meetings/2020-10-09.mdx @@ -0,0 +1,87 @@ +--- +title: "Stellar Quest: Learn Stellar, Win Prizes!" +description: "Tyler van der Hoeven introduces Stellar Quest, a gamified set of testnet code challenges that awards XLM and collectible badges, and walks through the app’s architecture, anti-sybil login, and prize-claim flow using Albedo and fee-bump transactions." +authors: [kalepail] +tags: [tutorial] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Join this Engineering Talk to learn about Stellar Quest, a new way to onboard developers through a structured series of practical challenges. SDF’s Ecosystem Evangelist, Tyler van der Hoeven, explains the goal—bridging “normie devs” into the network with a low-friction learning path—and demos how the experience works end-to-end. + +Stellar Quest is organized into timed challenge drops (released on a schedule), completed on testnet so participants don’t need to buy XLM. Each challenge unlocks a collectible badge, and early finishers can win XLM prizes while supplies last. Tyler also explains how the team balances “enough hints to keep people moving” without giving away the full solution. + +On the engineering side, the backend is built with Cloudflare Workers and KV storage, while the frontend uses StencilJS hosted on Vercel. For authentication and anti-sybil protection, Stellar Quest uses SMS-based login (with support paths for regions where verification is tricky). Prize claiming is handled through Albedo: users sign transactions in a non-custodial flow, while the app uses fee-bump transactions so users don’t need to pay fees when claiming prizes. + +### Key Topics + +- How Stellar Quest onboards developers with gamified, modular challenges on testnet. +- Challenge cadence: scheduled releases, badges for completion, and limited XLM prize pools. +- Architecture overview: Cloudflare Workers + KV, StencilJS frontend, Vercel hosting. +- SMS login as an anti-sybil mechanism (and how exceptions/support are handled). +- Prize claiming UX: Albedo signing, non-custodial design, and fee-bump transactions to sponsor fees. +- Why prizes may be queued (KV latency), and how more real-time primitives could improve it. + +### Resources + +- [Stellar Quest](https://quest.stellar.org) +- [Stellar Community Fund](https://communityfund.stellar.org) +- [Albedo](https://albedo.link) +- [StellarExpert](https://stellar.expert) + +
+ Video Transcript + +[00:00] Hello everyone. My name is Tyler van der Hoeven and I am very excited to be presenting on another engineering talk. Today we're going to be going through + +[01:00] Stellar quest. This is a new app that I'm very excited about. Before we get started, though, a couple of little housekeeping items. As you all have questions as we're going through today's talk, you can either scan the qr code here on the screen to ask questions or just pop them into the youtube chat. We'll try and get to questions. So, as you have questions for me, be sure and pop them in there. We'll have a time for a Q&A at the end and then we're just gonna kind of walk through this application, Stellar quest. So it's been exactly a month actually. So, right on the heels of the big release of scf 2 0 there's been this long standing project that I've wanted to build, kind of filling in the gap between what I would call normie devs- you know, just developers that may not be familiar with cryptocurrency or have been very wary of jumping in- or even a lot of friends that I've had from + +[02:00] Past jobs, wanting to introduce them to cryptocurrency, but having a bit of a gap like where do I get them started? Where do I sort of plant them? I can't, just send them documentation, I want to give them a nice landing pad for Stellar. So there's kind of in this gap here between the normie devs, developers that are good and enjoy what they do but are not familiar with cryptocurrency or Stellar and have maybe wanted to or would be interested but need a good landing place, a good onboarding, technical onboarding into interstellar, kind of away from all of the you know win moon conversations and pricing and stuff, but just want the technology. Like: help me understand from a technical perspective what's going on with these apps. So that is Stellar quest. It's this stepping stone between things like the scf and kind of being deeper into the technology and somebody who really knows nothing about cryptocurrency or the technology that powers it. + +[03:00] So this is a very technical, focused challenge. It's a gamified application that I've built. So right after scf I started working on Stellar quest, so this has been a solo project that I've been working on over the past month and since this is an engineering talk, I think it's worth pointing out because it's interesting and it's super interesting to me. It's all built on cloudflare worker technology. I'm kind of a aws lambda- I love serverless technology, but really wanted to try cloudflare workers. So all the back end is built on top of cloudflare workers, their serverless architecture as well as their key value, pair storage, and then the front end is stencil js hosted on versal. So for those of you who are interested in that sort of thing, I think it's fascinating. I really enjoyed working with those tech. I love stencil js and cloudflare workers was a real joy to work with. Definitely looking forward to using that moving forward. I don't have a fancy + +[04:00] Moving forward. I don't have a fancy slide deck. I will call out this one real fast, just as you find these things interesting or want to keep up with projects that I'm working on. Be sure and follow me, tyvdh, on twitter. And again, I'm the ecosystem evangelist at the Stellar Development Foundation. I don't have a big slide deck. We're going to actually be looking at the application itself. I'm going to walk through the app that I've built and how to use it so you'll get a little bit of a sneak peek. This is a challenge application. So for those of you who are lucky enough to tune in, you might get a little sneak peek at what we have coming with the upcoming first Stellar quest challenge. So I'm gonna go ahead and switch. I am going to unfortunately have to call out one of our third party services. Run kit is within the last 20 minutes actually is experiencing a pretty major outage, so some of our images, as well as challenge checking, isn't working right now. So if you see some broken images or we + +[05:00] Encounter some unexpected errors, it's probably related to the run kit outage, but that's unfortunate. Hopefully it won't affect us too much, unless, of course, it's cloudflare that's having the outage, but I think it's just run kit. So Stellar quest: this is our landing page at the moment, calling out a little bit about what it is. Some of this is up for discussion, definitely looking for feedback, and you guys are getting a bit of a sneak peek, a first look at what we have so far. So, as you have questions or comments or kind of the dedicated Stellar fans, be sure and reach out with any feedback or ideas for how to improve these screens or the commentary that we have. But you can see right away that series 1 of Stellar quest will be launching on monday. So challenge 1 of series 1 is going live at 9: 00 a m this coming monday. So mark your calendars. There are XLM prizes as well as these fantastic fancy badges which + +[06:00] You can't see right now because, again, we're using run kit to render some of those. So until run kits back up, those will be dead images, unfortunately for those of you again who is engineering talk. So these are badges. So we've got excellent prizes that are kind of a while supplies. Last sort of thing is: you, can you know, the first few people that complete each challenge will be eligible for XLM prizes and then anybody who completes a challenge will get one of these badges. So there's eight badges, there are eight challenges. Whenever you complete a challenge you unlock one of these badges. So it's kind of like the collectibles that you'll have on your Stellar account as you complete challenges. So these are not kind of one after the next. The first one launches on monday and then we wait until friday at 9: 00 pm, we've got a m and p m to make sure we cover everybody + +[07:00] Across the globe. Eight challenges: first one launches on monday. So very excited for this to go live. Be sure and mark your calendars. And then, as you have people that have been interested in Stellar- maybe you've been wanting to onboard your developer friends into what we're doing here- be sure and send them the link, which will be `quests.stellar.org`. I'm not sure I include that in any slides so I'll make sure and call that out so let's go ahead and log in so got some legal stuff we've got my super secret dev login number we're using SMS codes to protect against civil attacks where one person can pretend to be many people if we use social or email it's very easy to duplicate those and just continuously log yourself in complete challenges and thereby claim all of the prizes as a single entity + +[08:00] So by using SMS and only allowing actual mobile numbers you know no voice over ip numbers or landlines just mobile numbers connected to a valid network we block against a lot of that so that's why if you're confused why we're using SMS numbers that's what we're using for now to protect against the that's the anti sybil mechanisms that we're using same thing that we're using with the community fund verify that code and this will jump us to challenge number one now I'm going to give you a sneak peek of challenge one I'm going to try my best not to click on any other challenges so that you don't get too far ahead of us the challenge number one here is just creating an account so these challenges are all very modular and relatively simple if you're brand new to seller these might be like whoa what in the world is this but we've got helpful links brief commentary on what you're trying to do again these are challenges for prizes so we don't want to give away too much but the idea here is to give away enough information that people know where to go to complete the + +[09:00] Challenge as well as not to lose them along the way so we'll kind of test and see where we can make improvements to kind of have that good balance between giving away too much and not giving away enough to keep the incentive to continue on with the challenges but so we've logged in we've got our first challenge all the challenges will be completed with the same testnet account so this right here is just the account seed or the account public key and you need to fund this account you can't directly fund it with friendbot you're going to have to figure out a way to do it probably through friendbot through another account or something like that hinted but these accounts will all be done using the testnet this entire series still requests will be divided into different series the first series is the one that we're launching now it's comprised of eight different challenges and these series will launch who knows you know maybe we'll do some partnerships with other + +[10:00] Dev groups or just continue to launch these from the foundation I would love to see partnerships happen where there's lots of different series that are co branded with other entities and for each one of those you'll get a different public key that kind of links up to the different challenges and complete them all through there when we actually get to the claiming prize portion that's when you're going to connect a public key on the live network where you can claim prizes and actually have all your stuff on the live network but for the actual challenges everything's on test net so you're not gonna have to spend any money or go through coinbase or somehow figure out how to get lumens the idea again is just to understand the technology behind Stellar I've got a nice Discord channel if you guys want to join that once it's live we'll have some different discussion going on there as well as if anybody has problems or issues we'll try and build up a little community to support each other there's another thing I think worth calling out on the login there's a couple we're using + +[11:00] The twilio lookup service and there are some issues with different countries or regions around SMS verification so if you're pretty sure that your number should work particularly like if you're using a voiceover ip number but that's your main number or that's what you're using to get around issues that you have with your network but it's not letting you in you would jump into the Discord channel there's a channel there for help login that we can white list your number to let you in even if you're using a voice over ip number or in a country that twilio doesn't support but you're still an individual single human being so that's worth calling out if you're having issues logging in there is some help for that in general though it should work fine so I'm assuming these check buttons trigger off on run kit they actually look at run kit to see if your account has performed the action + +[12:00] That is required so I'm guessing that these buttons will yes throw some errors because run kit is done right now if you just can't figure it out and you want to just move on and kind of forfeit the cash prize but are still wanting to get the token you can click the shrug button here I don't know and it'll actually pop up a block of code straight from run kit which you can run it'll have pre populated in there your public and private keys so that when you actually go to run that code it will perform all the actions necessary to get your account to a passing state to where you could then check it and pass the challenge again you wouldn't get the XLM you wouldn't be eligible for the XLM prizes but you could still be eligible for the badge for completing the challenge and I'll let you move on to the next challenge so if you ever get stuck and you really just can't figure it out and you don't feel like jumping into Discord or asking for help which I really strongly suggest that you do you know try and get through it's the whole point but for some reason it's really stuck + +[13:00] You can always just shrug out of it and then run the code that will spit out and then check it again and it should let you pass once you've completed challenges however and you're moving right along every so often we run a check on the back end which compiles all of the people who have completed the challenge and it runs them against it kind of compiles all those together runs them against our prize array to see who gets what based on when you completed the challenge and then it generates basically an ID which serves as a claimable prize so in this case I've got one waiting here for challenge one and it looks like I got first place in this lucky me local hosts you know not the most popular domain name at the moment for everybody else to join so I won first place got 500 lumens for challenge one see again this beautiful badge here + +[14:00] And now I have the ability to claim this and so this is going to trigger the functionality to allow me to actually connect to or create or connect to a public account public key where a public network key rather and actually put this badge and 500 lumens into my account we do put a nice little call out here that's notifying that we're using albedo orbital lenses he's the guy who built StellarExpert developer good team they built albedo which is kind of it's a plug in but it's also just a pop up service that's a very low friction relatively low friction crypto wallet account that you can use straight from a browser both on mobile desktop safari any browser will do and that's what we're using to communicate between this website and the wallet so our non custodial + +[15:00] Application that we'll be calling this account kind of building that relationship between making sure individuals are owning their own accounts and understanding what they're doing but also having the ease of access of communication between the browser and the actual wallet so I've got an account here that's been connected it's right now it's on the test net but recently merged away so we can we got a little bit of activity on here but it actually doesn't exist right now so the first thing it will do will be to create this account and then claim those balances and albedo I'm using implicit very similar to oauth where you don't have to continuously enter your password every single time you want to claim a prize so it's possible that I've actually got a session variable looks like it I don't right now so it'll pop a session variable in there when I run the implicit mode so when I first click claim it should pop up that window to allow me to actually connect + +[16:00] To that into the password and then that will go and claim that balance looks like again we have a run kit issue so it won't actually let me claim this balance but the nice thing here is it should have assuming that error happened after yep we have that session so if I go and claim this again it shouldn't do the pop up it should just try and claim that prize again because we have the run kit issue won't actually let us claim it but once that run kit issue is cleared it'll take this balance it'll go and or I'll take this ID go and see that the public key that I've just sent along with the amount and the badge it'll create a transaction for me then it'll send it back for me to sign the implicit albedo session will sign that transaction and then actually send it back to Stellar quest backend and then the stutter quest backend is actually going to wrap that whole signed transaction in a fee bump transaction so that your users or I guess sell + +[17:00] So that your users or I guess sell request users never have to pay fees themselves I'll be consuming all of the fees but I still get the really high throughput so if a lot of people are claiming prizes at the same time I don't have to worry about the sequence number because user accounts are consuming sequence numbers but my fee account is consuming all of the fees so a nice use case there of the fee bump that recently came out in Protocol 13 allows for a little bit higher throughput for claiming these prizes without having to have users actually pay the fees to collect their prizes so really honestly that's kind of it these challenges will come out every monday and friday again if we look back at our schedule you can see mondays and fridays 9 a m and 9 p m going from challenges one and two for four weeks all the way through to November six we'll be calling out things like meridian hackathon which is open and + +[18:00] Ready for registration as well as things like the Stellar community fund particularly the lab fund so if folks are enjoying Stellar quests and really getting into the challenges there's places for them to go once we again I'd really like to have partnerships with other developer groups there might be opportunities for co branded links and really starting to open up the Stellar sphere to people that are not inside crypto that's kind of the whole point of Stellar quest is to really aim at normie dead steps that have not been involved in crypto before but are interested or at least willing to take a try so long as it's kind of that low barrier gamified entry level experience which is what we're trying to achieve again with Stellar quests so with that we will open it up for questions does anybody have questions about Stellar quest + +[19:00] I'll just wait we've got our onca team looking through if there's any questions if there aren't you know if I explain everything so well of course there are no questions we'll wait a few minutes and see if anybody has anything to ask so one question you all should be asking is why do we have to wait why can't we just claim prizes as soon as we finish a challenge why do you have to put it into a queue that's an excellent question glad you asked it has to do with the fact that I'm real I really tried to use cloudflare exclusively for the back end so we're actually using their key value storage database if you will but it has a potential latency of one minute so you can imagine if people are completing challenges in one part of the world and people are + +[20:00] Completing challenges in another part of the world if that's not in sync I could complete a challenge after you but claim the prize before you and so that's not that's no good so we have to wait until that potential latency has passed to allow everybody to claim the prize that they actually won by looking at time stamps once cloudflare actually announced last week a more real time synced they called durable objects I think it's in beta right now so once it reaches a stable release that'll allow us to do claims in real time so we won't have that kind of intermittent waiting period between completing a challenge and claiming the price for it you'll know immediately as soon as you finish it what place you got and whether or not you have XLM prize associated with it so excellent question next + +[21:00] Another good one interesting one not really a question but something that'll be quite fascinating is once Protocol 13 I'm sorry protocol wait 14 goes live I think it's 13 I think the last one was 12 I'm a seller person I can actually remember goes live with claimable balances and sponsor reserves the whole process of claiming the prizes will be much easier one of the issues right now with claiming prizes is it's a badge so it's a custom asset that's stored on the blockchain which means I have to open up a truss line for it and so the there's a very real time opening up a truss line and issuing an asset that happens where I have to kind of just sit there and wait until you're immediately asking like hey I'm ready to receive this prize once claimable balances exist I can just say oh this person won this prize I'm going to set up an entry in Stellar that says here's the prize for this public key and it's out here which will allow you to claim it at any + +[22:00] Point in time from any place you have access to that public key so rather than having to sort of have me and you both there at the same time to both ask for create the transaction for and then receive the prize those things can happen asynchronously which will make the prize claiming process much easier as well as you know if you claim the prize or you're eligible for the prize but for some reason you decide not to claim it I can leave myself as a claimant on that claimable balance and say after one day or after a week if you don't claim it I'm going to revoke it and put that prize back into the pool for somebody else to grab so very excited about claimable balances and then obviously sponsor reserves will be really nice because right now I'm actually creating accounts for people as they come in and are attempting to claim their prizes if their account doesn't exist on the ledger yet I put down the minimum balance for the account for it to exist as well + +[23:00] As covering any fees for the assets that they're trusting so each of the badges has a 0 5 lumen reserve that's required for you to hold that asset so I give you all those lumens right now into your account it'll be really nice once we have sponsor reserves where I can just sponsor those for you rather than actually giving them to you so both of those new features coming out to the next protocol release will be amazing and will greatly improve that along with the durable objects from cloudflare will make Stellar quests just a little bit faster and honestly easier to manage from my end on the Stellar side so very excited about those it's been interesting actually building a production level application on top of Stellar so it's like a meta application where you're both teaching people about Stellar but also using it to teach them through the badges and the prizes and + +[24:00] Actually trying to get all these things to work well where it's a very high friction to try and game it but it also is really low friction if you're just trying to use it's a lot of fun it's quite challenging to build an application like this where it actually works but it's been a ton of fun super excited to see what the future of it holds again as you have feedback be sure and leave it go through these challenges the first one on the public network is launching this next monday so all right if there are no questions thank you all for watching this has been a lot of fun looking forward to actually launching this live make sure again mark calendars get out there and good luck bye + +
diff --git a/meetings/2020-10-29.mdx b/meetings/2020-10-29.mdx new file mode 100644 index 0000000000..e195682899 --- /dev/null +++ b/meetings/2020-10-29.mdx @@ -0,0 +1,176 @@ +--- +title: "Custodial vs. Non-Custodial Apps: Which Side Are You On?" +description: "A roundtable discussion examining how Stellar applications manage users’ secret keys, comparing custodial and non-custodial approaches and exploring new protocol developments that could shape the future of key management." +authors: [justin-rice, lisa-nestor, nicolas-barry] +tags: + - community + - CAP-27 + - CAP-35 + - SEP-30 + - SEP-6 + - SEP-24 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +If you’re building a product on Stellar — or on any blockchain — one of the first and most consequential decisions you’ll face is how to manage users’ secret keys. Custodial solutions keep keys on behalf of users, while non-custodial solutions place keys directly in users’ hands. These two approaches are often presented as opposites, carrying different technical, regulatory, and philosophical implications. + +In this ecosystem roundtable, builders and Stellar ecosystem leaders debate which approach matters most right now. Drawing on real-world product experience, the panel discusses how custodial and non-custodial designs affect usability, compliance, recovery, and adoption, especially as Stellar applications reach beyond crypto-native users toward mainstream audiences. + +Panelists shared real-world experiences building wallets, anchors, and financial products for non-crypto users, highlighting how usability, recovery, compliance, and customer trust often matter more than ideological purity. The discussion also explored how newer protocol features and ecosystem standards could shift the balance by making both custodial and self-custodial designs easier to implement safely. + +### Key Topics + +- How custodial and non-custodial key management differ in practice and in philosophy +- Tradeoffs between user sovereignty, recovery, and regulatory responsibility +- Why enterprises increasingly adopt custodial designs as applications scale +- How CAP-27 (multiplexed accounts) could simplify custodial architectures +- How SEP-30 (recovery signer) could improve non-custodial key recovery +- Which key management approach the Stellar ecosystem should prioritize next + +### Resources + +- [CAP-27: First-class multiplexed accounts](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) +- [CAP-35: Asset clawback](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) +- [SEP-30: Recovery Signer](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0030.md) +- [SEP-6: Deposit and withdrawal API](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0006.md) +- [SEP-24: Interactive deposit and withdrawal](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0024.md) + +
+ Video Transcript + +[01:00] The chat. Yeah, we're live, all right, hello. Hi everyone. It's a pleasure to have you with us today. I, my name is Lisa nester. I am a senior strategist at the Stellar Development Foundation and you are joining us for our third roundtable discussion, second topic that we've ever covered, and we are very happy to have you with us today. + +[02:00] So I guess, to get started, I'd love to. kind of frame again the purpose of these roundtable discussions. Hopefully we're going to have a lot of audience members and viewers joining in and we'd love to invite you into the discussion as well. And then I'll jump into what the topic for today is. So, in general, at the Stellar Development Foundation, you know we want to make sure that we're facilitating broad, inclusive, ecosystem driven discussion around the types of tools that the Stellar ecosystem needs to continue to grow on and have successful use cases and businesses utilizing Stellar. And so, in that regard, we've started these enterprise roundtable discussions to invite key enterprises within the Stellar ecosystem to sit down at a table with various members of the seller development foundation to discuss topics that are really timely and important for the growth of the ecosystem right now and, as I've mentioned, we also live + +[03:00] Stream these events and have a Q&A box for any viewers so that we really can do our best to include all of the voices in the ecosystem. So if you're watching us and you have comments or questions as we go through this discussion, please drop them in the chat box and we'll do our best to respond and include those perspectives. So, for the discussion today, we're going to be talking about custodial versus non custodial applications in the Stellar ecosystem, and so why is this an important topic? Well, the truth is that if you're building a product on Stellar or any blockchain network, really one of the first things you have to figure out is how to manage your user secret keys. There are two fundamentally different approaches that are seemingly practically and philosophically at odds. Custodial solution: keeps- keep keys on behalf of users, and non custodial solutions: put keys into users hands. And I think, + +[04:00] Put keys into users hands, and I think most of us would agree that during the early days of Stellar and also within the broader crypto landscape, non custodial wallet designs were kind of by and far the default. In my opinion, this is because the novelty of kind of being your own bank and having direct access to digital assets drove forward applications and wallets that were primarily not custodial nature. However, as the Stellar ecosystem has grown over the last few years, we have seen more custodial design approaches by enterprise and our ecosystem. So that's why we're opening up this discussion today, because we've seen a lot of growth in applications on Stellar that are getting more and more users. We think it's a great time to check in on how wallets are currently set up, experiences people are having and how they're thinking about moving forward. And then also we have two, at least two kind of specific technical developments within the Stellar ecosystem that are worth checking in on as + +[05:00] They're related to the specific topic at hand. So one is [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md). So [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) is allows first class multi multiplex accounts. This was included in Protocol 13 and it basically allows services to map a single seller account to multiple users and it could make custodial solutions a lot easier to build. On the other hand, we also have SEP 30, which is a recovery signer, and this defines an api that enables a user to regain access to a Stellar account after losing a private key, without providing a third party control of the account. We're actually really lucky to have the key architect for SEP 30, Lee from the SDF, joining us today, and this solution in particular is something that could make non custodial solutions a bit easier. So, kind of checking in on where things are at and also thinking about these two new technical developments, this is going to be the focus of our discussion today on custodial versus + +[06:00] Non custodial solutions. So with that, I would love to get started, and I would love to start by having- our fantastic guests join us. I will call you out just because we're on screen and not in person. So if I can invite marco to stardust, hello, thanks for having us here. My name is marco. I am one of the co founders at saldo mx and anchor service for the mexican peso. Wonderful thanks, marco. All right, how about meinhard next? Yes, so yeah, my name is mayanhard and I'm the CEO and founder of satoshi pay, and recently we launched d transfer, which is a b2b cross border money transfer service, and that's kind of the main focus of what we're doing right now. Awesome and actually for god. I said that I was going to ask a special surprise question. So we'll come back to + +[07:00] You, marco, and this goes straight to you: mine hard, any new quarantine hobbies that you've picked up over the last nine months. We'll say 20 new hobbies, not strictly your hobby, but I meditate every day. So that's going to be something I picked up. Okay, this time, yeah, awesome, marco. Well, this is exactly. I mean not a hobby either, but I've been like learning a lot of cooking months, so yeah, and I enjoy it. So happy. Hopefully other people are enjoying it too. That's awesome, that's great, cool, and then I will turn it over to you. Christian. Yes, thanks for having me. I'm christian, the CTO and co-founder of d stock so we do tokenized securities such as apple and tesla and make them tradeable on the DEX. And + +[08:00] Yeah, we're live, since I think not. And or three months, and have looked deep into custodial solutions quite late, great, and any quarantine hobbies that you picked up? Not really hobbies, actually. So actually the thing is, in germany there was a lockdown of schools as well. So I was like, after doing a lot of cryptographic math in the last year, I was doing a lot of primary school mars this year when I was had to teach my daughter. So that has become my hobby. That's fair enough, that's good, all right, and so next I'll hand it over to Justin from the SDF. Hi, I'm Justin, head of ecosystem at the Stellar Development Foundation, excited to be here, obviously. Well, my team, we care a lot about what the ecosystem is doing and whenever there are improvements or changes, part of what we try to find out is whether they work for the ecosystem and also how to get them out there. so that everybody's using them in a way that makes things interoperable. So very interested to hear people's feedback. My quarantine hobby so far is: that's + +[09:00] An important hobby. I hope everyone checks it out. Any homeowner should make sure to develop that hobby. I live in an old house and it turns out there was a lot of asbestos and okay, that just becomes a consuming thing. It can. Yeah, absolutely okay. And then, last but not least, lee. Yeah, hi, I'm liam khalik. I'm a software engineer at the Stellar Development Foundation and I contribute to sets like sub 30 septem. And yeah, I'm just really interested in custodial and non custodial solutions and how we make them interoperable. Well, sounds like you're in the right place, lee, but you can. You don't get away without letting us know if you have any quarantine hobbies as well. Similar to christian definitely + +[10:00] Became a teacher as well. Okay, those are skills that will pay off, though, I'm sure. So that's great. I myself picked up skateboarding and this thing called a rip stick. A little bit. That's kind of like a cool. That was you know a good way to, when maybe there's no commute. It was a nice way to spend the afternoons after work. So, okay, great, well, I would love to dive in, I guess, straight into the meat of it, which is: I'd love to hear what each of you are doing for your current custodial strategy with estoc and saldo and satoshi bay, and you know maybe a little bit about why you chose that approach and how you're thinking about things now moving forward. So, minehart, perhaps we'll start with you, sure? So at satoshi bay, we essentially have three major services. One is the micro payment service that we're known for micropayment for digital content. Then there's obviously the solar wallet, open + +[11:00] Source Stellar wallet, and then recently there's d transfer, and all of them, use a wallet solution. That's self custodial and I'm saying self control here because internally we use that word instead of non custodial because we think non custodial is a bit misleading because there is a custodian which is yourself, and so we chose to be self custodial essentially just to have everything be software, and all the trust is in software and not in any external party, not even us. This helps us to move further or to release products to the market much quicker, and also has removed a huge overhead in potential regulation or regulatory requirements that we have to fulfill so we can just push out the software and sort of have people manage their own private keys and therefore being really just a software provider in that sense. And that being said so, for d transfer, we + +[12:00] Recently also released a self custodial resolution- on monday actually to production, and here we really give users the full control over the funds, and also the reason again is a regulatory one. However, in the future we plan to give users a choice, maybe that they can either have a custodial solution by us or by some other external, the custodial party that we partner with, or even combine both approaches. That's super interesting. I mean, is that something that would be a relatively easy opt in decision to kind of automate from a back end perspective, or would these need to be two totally separate kind of infrastructure solutions? So yeah, so the combination would be very separate, or not very, but a pretty separate legal, let's say, connections that we have with our users, and also technically it would be quite different. Yeah, so that's + +[13:00] Also where the different CAPs and SEPs come into play, each on their side. So it's going to be quite some development work and also some, let's say, legal ground work that we have to lay first, what is motivating you to consider adding this second solution where you provide custody, mainly user experience, because a lot of the customers we have are not familiar with this sort of power that self has, so also the power to lose your money. Essentially, and that is something we want to maybe give users the choice if they, you know, want to be in full control, or if they trust us with a certain amount of their money, or even all of their money, like a more like a bank like service. It's also right now we didn't find a custody partner yet and also we're not regulated in that way. So it's we can't actually legally launch a + +[14:00] Self or- sorry, a custodial solution at this point, but we can launch the self custodial and that's why we move forward. With that, we can now handle bigger transactions as well. As much as there's digits and there's, let's say, a Stellar code, we can add like even very large amounts if we want. That's great. I really want to dive into some of those points more specifically around. You know the usability side of things and I'm also kind of curious to hear for the users that you think would want a non custodial solution, kind of what is a driver or motivator there as well. But maybe let's go ahead and jump to marco to get a little bit more of our landscape about where we're all currently or where you're currently sitting. Yeah, thanks. Well, philosophically, I'm personally very aligned + +[15:00] With the self custody movement that you know cryptocurrency has started and I don't see, for example, in single asset blockchain such as bitcoin, the really the need of thinking through like custodial solutions. But however I've seen Stellar since the very beginning seeing and realizing the market opportunity of representing real world assets on this public network and allow exchanges and transfers, and that requires a lot of companies like ours to that are regulated entities capable of holding value for, on the behalf of their users, to participate, and the needs actually that these companies have is they're very different because, + +[16:00] Like, they want to leverage the, this ability to hold people's funds and be able to, like, improve usability as much as possible and remove any friction that can be created by the self custody approach, which we all know very well. And I think, what we've seen is under development, especially, you know, from the ecosystem of standards that can help expand those custodial solutions, and I think that's something that we should think through more and focus, because, the same way that in cryptocurrency has been like expanding, + +[17:00] Thanks in big part, to exchanges which are custodial solutions, I think that it, at the end of the day, it, I think that it, at the end of the day, it will be extremely beneficial for all if more custodial solutions are able to adopt the kind of the Stellar protocol in a way that makes sense for them and, in that way, increase the use of the whole network and benefit, including, like, those players that are using self custody solutions. Yeah, I mean, I think that this is, you know, it's. I think there's kind of a philosophical point of tension here. Where you know, I think many of the people within the Stellar community really like the financial sovereignty that blockchain can provide, but there's also typically a deep desire to create impact, to use a platform like Stellar to provide + +[18:00] More access to financial products and services for everyone. Right, and so how do we do that In really simple ways? Or how do we do that in ways that work across user types outside of just people who really understand cryptocurrency? So, but, marco, can you help clarify? How does saldo actually work? How are you set up right now? Yeah, so we as an anchor, support like both, like approaches. So if someone wants to, you know, utilize our services through a non custodial solution like Lobstr, they can go ahead and do it, but we're also maintaining a service that it's called smx, that it's our custodial solution, that we believe it's really important for the business development of our company. So that, basically, is a very easy way to + +[19:00] Use a wallet that allows especially mexicans who are in the us working hard and sending money over to hold a little bit of mexican pesos in a digital way, and that replaces the need to go and cross the border and open a bank account, which is almost impossible for the majority of them. So, like, we are really focusing on this community, which is not tech savvy, he's not a part of the crypto community at all- and we've been like super motivated from what we've seen, and this is a community that actually could not care less about like the self sovereign aspect of crypto, but they just want, like, the task to be done, and so they we've been like developing these with these people in mind, and that's, for our company, a big focus right now, and we also want to make it compatible with any other Stellar-based solution. But in + +[20:00] Our approach- this is in fully custodial way of doing the, of using our anchor gotcha, and it sounds like you already have some thoughts on where there could be more support within the Stellar ecosystem for being able to kind of launch these types of products. Yeah, I think that you know they're already there- a lot of interesting discussions like the moxed, like multiplexed accounts that you mentioned. But probably we- yeah, we, and I'm sure we will talk more about this later on, but I think that it's in general, very interesting just to kind of like see what the users are doing and just like see that basically the entire world it's gonna take a long time for the majority of people to adopt + +[21:00] Like the self custody tools out there, and I think that we should like be kind of like aware that there's a path towards that kind of like decentralization and, in the meantime, it's really important to serve the people that they want to be served in a specific way and for us, like we are at a stage in which is really important to provide a custodial and easy to use solutions like this. Great, yeah, and I think we definitely will dive into some of these various things like multiplexed accounts, more so great. Thank you so much, marco and christian. I know you guys have had an interesting journey with these talks, so we'd love to hear about what you guys are using, how you set things up and how you're thinking about the future. Yeah, I already have a lot of points I want to touch on. So, yeah, maybe I can start with. There are. Yeah, + +[22:00] Maybe I can start with. There are actually two different ways: how you- or multiple ways, actually how you can use these stock technologies. So, first of all, there's an api layer which basically everyone can use. It uses Stellar standards and people can build products on top of it, and for that one, we actually don't have any opinion on what the user, how the user protects his key, his or her keys. So we just basically verify the user and where he's coming from- if it's not like a terrorist, something like that- and then they can use their account to trade our assets, but we as well- that's our actually main product do. We have a mobile app and, because countries we serve emerging markets, there's a mobile first approach and this app is actually not targeting crypto users. So if you want to use, if you're a Stellar heavy user and you want to use these stock assets, you can use, for example, Lobstr wallet or something like that. But for already non crypto users who are new to investing and haven't had touch points at all with + +[23:00] Crypto in the past, it's actually quite of a hard concept to grasp: to create a private key. So, like currently in our onboarding process, which is self custodial- I'm going to start to use this word as well- it's an xd minor- the people actually in the onboarding process have to create a private key and we see actually some kind of a drop off rate right on that. So it's like people have a hard time doing that. So I understand that they're a bit scared. Maybe they don't understand it and things like that, so that's a problem actually to us and I think, to everyone who wants to build end user product, that's not just targeting early adopted adopters. And the tricky part on this one is like: in the very heart of d stock is that d stock itself does not hold customer customers funds. So that's the basic idea: how we can actually pull off the entire business, and that's true for most of crypto companies, I guess that they don't hold customers funds and are + +[24:00] Therefore eligible to cut off a lot of middlemen that are normally required in financial transactions. And yeah, so basically, if we want to go away from that- and we're actually planning to do that, so we need to- we want to find a solution that makes it easier for undo this. We have to go to some way. that's not only self custodial and I'm not saying: well, let me phrase it differently. The first thing that we need to define or that we have to talk about is what actually does custodian, what does the custodial solution mean in this case? And this definition isn't actually as clear as I would as one would guess. So we're always talking about: okay, they're self solving their self custodial solutions and custodial solutions. But the definition of custodial is quite a bit more complex. I've read it to the bit. For example, the barfin, which is a german equivalent, roughly the icc in america, defines custody as someone who's holding a funds off on behalf of other people, is able to transfer them, make payments with that and all this kind of stuff and + +[25:00] All holds a private key that's eligible to do that. And for example, in SEP 30, that's where you say, okay, we're splitting off weights between third parties that need to jointly recover a key for user. That's not really that definition. So it's not really clear. That's actually a custodial solution that would require regulations. So like by the very definition of financial regulators it may not be, but it's actually not that clear because it's just basically new land. So nobody has actually challenged that. Maybe the regulators weren't even aware that their weighted keys- things like that, says the entire field is it's not as easy as it gets. So for us it's like very crucial to find a solution where we say, okay, people will be able to recover the key, but I wouldn't call that a custodian, custodial solution, because the people does do not hold the funds of the user in custody, they're not able to transfer + +[26:00] Them, doing stuff with that and, more importantly, they are not able to. Yeah, then they don't have to do insurance and things like that for it. So, yeah, that's the basic thing. I'll sing around it. So customian solutions are non custodials who should be. It's not the only thing in the real midst as well, just mainly for us about key recovery that we want to try to solve. That's great. I mean, I guess I'm curious: do you, mine, heart or marco, have kind of like a similar opinion or reaction? Extremely relevant and interesting because, for example, I think that, for you know, for a lot of regulators, the fact that a user of a regulated entity actually goes and does something and loses the private key is not sufficient, like for the regulator to + +[27:00] Just be fined the legal obligation. You know that the company, has still is a valid payment obligation that the company needs to honor. So I think it's not sufficient just to you know like it's like the key management will not remove or will not make go away that legal kind of like obligation that some custodial companies have and that it's something that it's very like valid across, like all the kind of like real world assets, including currencies and I think you know, like at that point, like you realize, that somehow, for example, if a user of our anchor loses like the private key and you still see the funds, you at least need to show that you have a process to be + +[28:00] Able to, in a way, cancel those funds or do something with that account and then like credit back those tokens to the new or like re key kind of like. You know, like doing those things, it's it helps companies like ours to comply with local regulations that are not aware of, like what private keys can do, and I think it's like super interesting and makes companies like ours realize that it's a very complex topic. That is an interesting point you make. Oh, you go first planet, okay, then that's interesting. Okay, sorry for that. That's an interesting point to raise. I mean when you, when you're anchoring an asset, + +[29:00] You always have the option to freeze it and, okay, it's still on chain and it's a bit harder to keep track of it, but it's still in an automated way. You can prove that this assets are frozen when you, for example, kind of take away trust lines and then you can issue the token, the assets new, and that's. It's somewhat of a cumbersome way. It may be easier in the future because there are some proposed changes to Stellar to reclaim assets, but this is some way for us, for example, to recover funds at the moment, even if the people lose their key in their self custodial solution, the problem is actually not from that. So we can always do that. The problem is merely if the user just, for example, deposits a million in excel and then loses this key or her key or the same for stablecoin. So may not be able up to this process, so, but the interesting idea of just reading the assets and reissuing them to new accounts is always an option as well. That's so + +[30:00] You don't have to, so that you just so up the old account when you lose the key. Just on that briefly. Obviously you need to flag the asset as being revocable and, yeah, there's acid types where you work, there's configurations where you don't have that. So if you forgot to do that or if you didn't plan for that, then obviously that's not possible either. So what we've done in our micro payments product- actually we used a little bit of a trick there. So we are co signing all the transactions that are being done from for the micropens wallets and we also have a time locked transaction that we issue with each payment, meaning that after a certain period of time, the funds will return back to us. if no other transactions have been made, meaning that if the user loses a key and they flag us, they tell us that they did so we will simply not co sign any transactions from that key anymore. And then after + +[31:00] Half a year- I think it's currently after time that we have programmed- we can then recover these funds. That means we can issue even lumens to the users from. We kind of give out a bit of our liquidity beforehand, but in that way we can also sort of have a account recovery for accounts that actually are lost, but the funds will come back to us later after half a year, sort of a workaround for that issue. But isn't co signing the definition of it not being as purely self custodial depends on the type of weight a key has. So if you have, you know like a minority weight, and then so I don't know how the regulator sees different types of water signatures, because that's also depending on the chain. Obviously I well, it's, a hybrid. I would say a + +[32:00] Hybrid, I would say yeah, and I think you know just to kind of respond here. So one I wanted to flag: christian had mentioned a CAP on Stellar which I wanted to name, just if anybody's watching- yeah, [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md), which is the asset clawback CAP, which is a new proposal. So if anyone's interested in exploring that further. But you know, this is really interesting because it still feels like we're dancing around these areas of kind of how can you stay lean, and you know I would say kind of do things that allow you as an organization to kind of have a leaner regulatory framework as authentic, along with things that are more tied to kind of like usability and how do we make + +[33:00] Experiences for consumers to that can keep their information safe but still provide kind of like non confusing not overburdened, some experiences on the product. I mean, do you guys see any other major areas that are kind of- I'll say kind of like- dancing around the surface here? Because I know we're kind of talking back and forth about various designs, but it feels like there's kind of like larger themes or larger questions, So maybe I can start because I mean we've been like also thinking about this. The problem is that if, honestly, it is true that it removes liability, the fact that you can actually delegate the key management to your users, and that, like, fundamentally, will remove like + +[34:00] Regular regulatory, like load, the problem is that if you see what happens in fintech, is that okay Regulation is more expensive than software, sure, but is it at the top of the list? I don't think so. It's like you have software which is cheap and then you have regulation which is way more expensive, and then you have distribution. So if you see, like, what's the big expenses that the companies have, it's most like distribution. A customer acquisition cost is what sometimes kills like the unit economics. So it's not regulation at all, even if it's like super expensive. So I think that a lot of companies realize that okay, yeah, there's some efficiencies to be made in that you know liability that goes away, but it's not the entire story. So you still will need to go and find the users, and that's like + +[35:00] Extremely expensive, and then sometimes it's even more expensive if you need to make them learn what like a 12 like 12 word password means or what like a mechanism to like that it's not intuitive- is to recover like the secret key for your funds, so that those things will add costs to that very like heavy cost of customer acquisition or will reduce that- the universe of people that you can target. And I think that's something that from the business perspective, it's not very positive. So I have a question which is, basically, if you had something I mean and this sort of relates specifically to SEP 30, which I think is a very clever way to give users access to their account + +[36:00] Without ever having them to touch a password or know a secret key or use a backup recovery phase. They can basically log into a Stellar account using just their phone number or their email address- some proof of identity, right? So with a solution like that- you know well, I guess there's two questions. One: does a solution like that solve the user experience and allow you to create a non custodial sort of interface or solution that doesn't feel that sort of obfuscates the crypto nature of an app from a user? Like, basically, can using something like sub30 make it so that a user experience for non custodial wallet is just like a custodial wallet one and two, if it does it solve the biggest problems or does it, does it only solve some of the problems and there are still a lot of problems left over that would still make you choose to use a custodial solution. I mean, maybe I can just quickly say that I think it's the final, quickly + +[37:00] Say that I think it's the final user who will judge those things, because at the end of the day for me, it's very intuitive to use a memo ID just to identify a final user- that actually hasn't been the case for the majority of users of exchanges. So in a way, like something that feels very intuitive in from the kind of the sign point of view, and this is true across many different SEPs. That for me, makes a lot of sense, but once that they're implemented like, things become trickier, and I think that's the only thing that I could say- probably christian can say much more: yeah, well, I think well first of all. I like the basic idea of study and it's definitely a valid one. We do have some problems with it by when we want to implement it. That it's one thing that's very common among when we implement SEP ecosystem proposals for those who don't know our standards in the stutter system, that anchors or people building on + +[38:00] It can implement and then everyone can use it. And it's interoperable between different clients, and that is a good thing if you're a client. For example, there is SEP 6 and SEP 24 for deposits and withdrawals from anchors and it's super nice. So basically, I am as a developer, I can go ahead and integrate 10 of them in a day and that's super nice for me as a client developer. But to be honest, it's not something I care about as a developer. What I care about is optimization of the user flow. So, for example, deposits is the most crucial thing in our flow ever and if I can increase conversion there by one percent or by ten percent, then I would do that. So it's not it's. I'm not fundamentally not interested in getting this thing done fast. I want to get it done right and currently I can't do a b testing. I can't do client side validations with SEPs and withdrawals, for example, just as an example for seventy grade. So this SEP integration as a whole focus on clients getting the job + +[39:00] Done, and that's not what we want to do. And tab 30 is kind of the same thing it has. It automates the entire flow for our clients that we can just do it and then integrate a little, integrate a lot with a lot of other clients who integrate 30, but it's not coming from the perspective of the client. So, for example- I'll give you an example quite lately, quite recently, I've lost my two factor authentication and I think was coinbase and it wasn't an automated process. I had to censor my passport. There was a person talking to me and that's fine. It's something I would expect as a user and it encouraged me that this- those people do their business right. Whereas on SEP 30 if the ease of the flow is like, okay, my email, my SMS is something I wouldn't fundamentally like as a user to be connected with me. That I'm just pushing a tech away from losing my entire life saving savings. When someone captures my email and my wife quite recently changed her phone number, she doesn't even have access to a whole number anymore. So it's like it's kind of a weak thing for us and + +[40:00] It focuses a lot of it's being automated. People don't have to hassle with it. It's fully integrated, it's decentralized and that's fundamentally not anything our users care about the basic care. You just want to log into the app, have an account and never care about it, and for us it would be totally fine if there would be just a person having an hd wallet in the physical world and it costs like thirty dollars or ten dollars or twenty dollars for key recovery, because if it's below it doesn't make sense anyways. If it's above, fine, or we could scale it. But having an approach that's just focusing on the client side wouldn't cut it for our users. So we would have to have a process that's more tied to the user experience, and the user doesn't want to care about that at all. I think that just for my side. I talked a lot, so maybe I'll keep it shut now. Do you think that there's like I think we've mentioned, in a few points- like there's this issue + +[41:00] Of there's this issue of keys being lost, and then what do people, what do users go through to get their keys back? And then we're talking separately about the products we're developing and all the functionality they provide. Do you think that there's room for custody as a product independent of the products you're developing? So maybe I can take this one, definitely so I don't know if this is starting to christian, but definitely for us that would be something we desperately want: the ecosystem and it would have to be a custodian that supports different Stellar assets as well, and there are wallets that support lumens already and insurance that support lumix, but to extend that to other assets and maybe even traits to some extent, that would be definitely something we would want to integrate and that's actually what we're actively looking for. I would want to make again the separation between custody and key recovery. + +[42:00] So we don't want to have custody, we don't want one want so traditional custody solutions work like they don't expose a private key to anyone, but even the users belonging to that, and you send transactions to some kind of signing service to give it back. so we don't want that. We won't always want to have self custody and the user be in charge of the things with his, with possession of his or her own key. What we want on top is key recovery solutions pretty much like sub 30 would offer it conceptually, with multiple recovery processes as weights where not each weight does not have the option to trade. But I understand where he was coming from. It was developed with a different thing in mind. It was what was basically developed for the use case of not only key recovery. It was as well developed for sharing keys and all kind of use cases that are required alongside, when you're not the only one having access to that key, and for that SEP, + +[43:00] Study makes a totally perfect job. I think it's just like for our purposes. But we don't want one. All of those features we just basically want at any- and in a very seldom selling case someone loses his or her key, can recover it and for that I think it would be fine if there would be just like two, or three, maybe notaries or trust companies, banks, whatever- who would provide the service, and maybe just a call center or something like that where people could provide their ID's and initiate a recovery process. So for the users of ours, that would. It doesn't have to be this decentralized solution, because most of the users are not crypto maximalists who said everything needs to be decentralized and all these kind of things. One thing if I could make a proposal that's a bit more technical involved would be that it's fundamental to Stellar to have counterparty risk right, so you have to establish trust lines to assets or anchors where you say, okay, I trust those people. So they say maybe for this. You say, hello, these + +[44:00] People are sitting in Europe, they are test by the regulators there. That's a bank in Europe that hold the assets in custody. So that's great, I trust those guys. I encourage you to do so. I established a trust line to these stock assets- and maybe that would be true for custodians as well, where you can say, hey, I'm establishing some kind of custodian, custodial trust lines where I say, hey, this guy can or girl or entity can hold a specific weight of my assets, so that you have it somehow baked into the protocol level. But that's just for the far future. For the very near future, it would be just basically fine to have two or three who are not, who are, who establish trust by being used by many of the enterprises building up on Stellar, marco and my heart. I'm interested if that idea appeals to the two of you also. Which idea that? Basically the idea of there being sort + +[45:00] Of trusted custodian, professional custodians out there that you could, I think in general it's it fulfills a need. I've seen always Stellar as a very diverse ecosystem and I think that the problem of like discussing topics of like, one solution versus the other, it's like it's not giving the full picture of what's happening. So Stellar actually, for example, I. The way I see it, is that at least in the ecosystem we touch, there are multiple different roles and they will like, some of them like will require something like that and some of them won't. So, for example, like we're thinking, imagine a migrant worker that has the need to send over 200. That's not the same need of the market maker that facilitates, like the exchange rates that those other people benefit from, that actually move millions of dollars. So that makes completely different needs and they're both using Stellar. So + +[46:00] I think in one case, so shared keys are badly needed, and in other ways, what some other projects are doing, which is like completely abstract in a way. And you know I, why would a company like ours would have the liability of the people's funds. That's because, like there aren't a lot of funds, maybe you can actually, because you're solving, probably you have, like you know, capital requirements that exceed those people's funds. So like those things will, at the end of the day, mitigate the risks and will make you capable of fronting such risks, and I think that's what I see. So in what mind, there's a lot of like sophisticated players in the storage system already in a way that understand, like, how the mechanics work, but they aren't yet as many users as I would like to see. I love to see every mexican who's working + +[47:00] In a kitchen in new york using a system that touches Stellar, and when you do that, it's, in my mind, a product that completely looks like something that this community would expect and that's completely something far from something like except 30 or something like that, because they don't want to deal with that. It just one like deal with the same thing that they've been doing with other companies when things fail, and that's like the market needed. That we've seen. Yeah, absolutely, and I mean just one point that you made there that I do think is really important is that you know, even when we're thinking about the requirements for just an application. Oftentimes there's a whole kind of like broader stack of ecosystem that's required to kind of make that end product exist and everybody who's in that value chain has kind of differing needs based on what they're doing. So, yeah, I think that's fair, you know I'd say, well, we're + +[48:00] In our last 15 minutes here and so, you know, I think we're already kind of doing this. But I would love to drive the discussion towards kind of identifying gaps or needs, or we could also put these as opportunities within the ecosystem. And so maybe I'll just phrase this in a slightly constraining manner- to kick things off, like if you, marco, meinhard and christian, could wave a magic wand and have like one new thing exist in the Stellar ecosystem to support your vision of key management. What would that look like? What's the number one thing that you think would be super valuable for you? Whoever wants, whoever has a response to that? I can start by the already multiplexed accounts that we've touched before. It's in general. I think that's + +[49:00] Something that, in my mind, is something that was missing and I think I would love to see completely adopted. And the reason is because the way that I feel as a user like the way that memo IDs are used, it's kind of like not intuitive and it's it feels like a hack. You know, like you know every. Everyone's familiar with a payment app where you can also like include a message that travels with a payment and so. But in this specific case, we're like hacking that field, that memo field, to actually embed information and that will make the payment to route differently. So that became like the default way of adopting Stellar, like for exchanges, and I think that you know the entire ecosystem accepted that was the way to do it. But I think if there's a better way, but + +[50:00] I think if there's a better way to do it- which in my mind it is multiplexed accounts- we should like, the sooner the better, adopt something like that because you can actually free that field, which is important for other use cases. As an example, the first implementation we did for bill pay actually encoded information about the bill, that like the reference of the invoice that was intended to be paid and that was encrypted and sent over the memo. So it's like to me those are things that other players may find like useful and interesting and will design that way, and if we use that field as just an additional feel that together with the address, we'll actually run the payment to a different like destination. I think that's completely, in my mind, something that shouldn't work that way and + +[51:00] You know, some evidence is like people make a lot of mistakes when they're like put assets in and out of exchanges and I think it feels like it's it would be focusing on people and companies using custodial wallets. But I think it's the opposite. I think if it's widely adopted, it's something that it would be there, available for everybody, and they will like just add a more intuitive way to just send payments to an address with an amount and that will like, satisfy the need of, like the routing and all the information that needs to travel with each payment. So in my mind that's something that needs to be adopted, obviously discussed before, and for me that wouldn't be like that. So you're putting your flag down on the cap + +[52:00] 27 planet in the Stellar ecosystem and I don't know where I'm going with this, but you're squarely on the [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) multiplexed account. Team- yeah, I think so, okay, great, wonderful, meinhard, looks like you maybe have a thing next. Great, yeah, so also, [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) is really interesting for us and that's something- unfortunately I can't answer. Maybe a leak and clarify: does it also sort of protects the privacy of the recipients or is it still easy to like, conclude the address that's behind it? Because that is something also we would wish to have that we have not the complete balance of all of our clients immediately visible to maybe a sender and so in a way, not anonymous but partially anonymous transactions, at least for the recipient. That would be quite interesting for us and then also something that christian earlier said, like + +[53:00] A sort of a footed approach or a like federated approach for key recovery or even for a coastal resolution. That would be definitely quite interesting. And then you said I should pick one thing. but maybe also you can say a third thing. It would be a sort of regulated entities that are offering this custody as a service and not just key recovery. So that's definitely something we would. That would solve a lot of problems for us right now, because fundamentally, we're not really- also because of our consumer's perspective or our user's perspective- maximalists and like you have to own your key. Ultimately, we want to offer a transfer service that passes on all the advantages of blockchain or Stellar in this case, mainly speed and transaction cost and also the transparency, and it doesn't have to be fully self custodial in our sense, like we want to give the user + +[54:00] The choice, and the user doesn't really care much about that. They should have a online banking like experience and that we need to know what's behind it and not also having the need to learn new vocabulary and new approaches. So that's like how we want to place our service. That makes sense. Yeah, I concur that. you know it seems like having, along with there being some kind of technical aspects to creating more capabilities around key management, there's also potentially ecosystem partners that we could bring in. That would really support this as well. So and then, lee, did you do you know on [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) if it adds any anonymity? Or I'm not sure if that's something you're too up-to-date on. So, yeah, so I can answer that it not specifically. So there's not like a feature [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) that would make it any more anonymous + +[55:00] Than using a memo ID for a transaction. However, you know, the service that's using the services using [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md), you know, could rotate IDs for users so they could change the ID that a user is using over time. Or you use some other strategies like, that obviously has some disadvantages. If somebody is treating a [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) like a market address, as a- you know an actual address for that user, if it's changing, that could be problematic, but I feel like this is similar to virtual credit card numbers, where you have like a credit card and then you have like model ones you use for different merchants maybe or different people, things like that. Okay, great, thank you, lee. And then christian, yeah, well, there's a short time and there's long term. So basically, + +[56:00] We're fundamentally focusing on non crypto users and for non crypto users, convenience is important. So there's a reason why things like what's app are more popular than things like ebay's because they are just more convenient and user slower in terms of the more convenient solution, all those. But I think that fundamentally, the key recovery is not should be easy on the recovery process. can be hard if I lose recovery process like my physical wallets, my credit cards in it- I'm up for that. It will be a complicated process to get that all sorted. So that's fine. But setting up my wallet and setting up my key- that you, that should be the easy part and for us like the easiest solution would be: just hey, on the concretion, when you create your account on d stock. We would just add two or three trusted and reputable public keys and those people do have some kind of recovery process. If I wouldn't be totally filled with work at d stock, I would + +[57:00] Right now fund a company, started, start a startup, doing exactly that because there's a need for that and like, in the long term I could see that I'm better than the protocol. That's the one thing I said was having trust lines to these custodials- pretty much like you have trust lines to add to assets or anchors- would be a nice way. Maybe even you could. One could think about that when you have an account and it holds assets from multiple different anchors, that you can say that those anchors could jointly recover and cover your key by depending on the weights and how many assets you have there, as well as cloud acid clawback with [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) could put a great job into that. But that's like it would be awesome to have it in the protocol embedded but like for our needs it wouldn't even be required for short term solution would be just like victory. Hey, let's get two or three trustable notary solicitors, something like that out there that we all trust and where we say, hey, let's give each other it doesn't cost me a lot of money, so that it could be like small no + +[58:00] Trees that are just regulated in some country and you just say, put them up, put a process behind that you can recover, and then I think it would be fine. Question just to, as a quick clarification, I'm not sure I can exactly actually conceptualize what this would look like if it was embedded in the protocol. And I know it's like very early stages, high level. But I mean, could you maybe illustrate that a little bit more like what you think that would look like? Basically that, if you have. So the basic idea currently is that you have at public keys as a signer and what you could do is when you say, hey, we create trust licenses and take that for custodian, but these custodians could jointly recover your key, even though they don't have weights in your public key could be. So basically, they would have these anchors, would have a public private keeper somewhere on the chain and this public private keeper would have the right to recover addresses. They would have been added as a + +[59:00] Signer but you don't have to explicitly had them as a signer. So basically you create a trust line to three custodians and then these three custodians could jointly recover your funds but you don't have to add them as a signer, so you don't have this kind of multi signature thing added to your account. Gotcha, okay, that's really useful, great. Well, this has been a super interesting and useful discussion and I have to say- and I'm sure Justin and lee feel the same- you know there's been a lot to take in and digest. I really appreciate you guys sharing your perspective and opinion on this and I have a feeling we're kind of just kicking this discussion off. So we did have one or two questions from the audience, which I will get up in front of me in just a second, oh, not that one, there we go. So I'll just throw this out to everyone. + +[01:00:00] I guess can non custodial everyone, apps connect to a bank and have a user receive funds in their native currency. So that's kind of like a non custodial to custodial payout flow. I believe the answer to this is yes. This is actually a type of payment flow that we facilitate on Stellar. Yeah, I don't know. I think this is the intention of all those, all the SEPs that have been adopted. So it's like, normally, an anchor that complies with the existing SEPs. What they would do is basically allow non custodial solution to be able to get from an anchor services like ours, like, for example, a bank informations that they can like, go from their own bank account to like the bank account that the anchor has, and then that would in exchange, you + +[01:01:00] Know, give the user some equivalent of tokens. So that flow. It's precisely how today all of the active anchors are doing. Yeah, you can use Lobstr or vibrant to deposit a number of different anchors because they already interoperate. Perfect, great thanks, guys. And then maybe Justin this one's for you: SEP 30 and [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md)- both look cool. Do any of the dev libraries or wallets support these currently? Yeah, that's an interesting question and part of why I think it was good to start to have this discussion. [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md), which marco advocated for earlier but has come up a couple of times, is called multiplexed accounts. It adds a new kind of Stellar address at the protocol level, basically allows a single Stellar account to map to multiple users. It was added to Protocol 13. However, there's- it's kind of like we put + +[01:02:00] In the wiring but we didn't put any put or switches right like there's no manifestation of this and Horizon are in the SDKs and there's no clear way, no clear like methods or standards for actually using these mixed accounts. So they're there, they're just hidden away, and so with those [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) specifically, in order to really be able to use it, we'd first have to build out the sort of platform layers on top of the protocol and then we'd have to start working with people to figure out how they would actually use those. So [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) is possible, but not really implemented in any existing applications. SEP 30, on the other hand, which is not at the protocol level, right. It basically specifies how you can split key recovery across multiple different servers run by different, by different operators, right. So instead of having a single key or instead of trying, it relies on third parties running these things called + +[01:03:00] Recovery servers. Right now, vibrant, which is a wallet that sort of is, facilitates usd savings. At the moment, mostly that relies. That uses two recovery servers to implement sub 30. But if other people in the ecosystem wanted to start implementing sub 30 in their wallets- this specific key recovery method- we would need to, as an ecosystem, get people to run these things- recovery servers, right? Recovery servers, which are sort of the servers that you delegate to allow a user to access their account if they lose their key. So, short answer: SEP 30 is implemented in one wallet and could be in more if we set up recovery servers that people could use- and by we I mean the ecosystem as a whole- having to add one point. So this idea that I mentioned about having trust lines for focus for not custodians but for key recovery people- the major change that it would add- sorry, my microphone brother- + +[01:04:00] The major change that it would add to the current solution, is that the current solution, if you add it as a signer, like in SEP 30, then you are always getting the other rights as well, so you can always like jointly transfer funds and stuff like that. Having these kind of recovery service at the protocol level, then it would really be just okay. I'm adding those people and they can give me a process to recover my keys, but they can never do payments, transactions, even if they are colluding or jointly hacked, those kind of stuff, and that would put a lot of clearance on the legal side and it would make this, the entire setup, a lot easier. You don't have to store keys online and things like that. So just clarify that. Yeah, I know that we don't have like a lot of time left, but I think it's interesting to keep the conversation going, especially because I think the multiplexed accounts can offer- actually widely adopted- some level of obfuscation to the payments if, like, people start using it, because, as lee said, like people can use them, as + +[01:05:00] You know, like as dynamic debit card numbers and it's interesting if the entire ecosystem adopts it, then, like people would be sending money over with different m accounts and that will, like obvious, scale a lot of the information which is really interesting. Yeah, well, it sounds like on both fronts, both on kind of SEP 30s, key recovery design and muxed accounts on the [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) side, we have a lot of thinking to do. I think, you know, as we kind of said in the beginning, this is the beginning of these conversations, or maybe the middle. I guess they've been going on for a while, so we'll definitely be continuing them. So with that I would say, you know, I think it's time to wrap up the discussion today, but I think it's likely we may want- to all meet back here in a few months, certainly after Meridian, and continue this conversation, hopefully making + +[01:06:00] Some progress in the time between. So you know again, christian, mind hard, marco, we can't thank you enough for your time today and for sharing your perspective and I hope everyone has a wonderful halloween weekend and we'll talk again soon. Thanks everybody. + +
diff --git a/meetings/2021-01-14.mdx b/meetings/2021-01-14.mdx new file mode 100644 index 0000000000..504565555f --- /dev/null +++ b/meetings/2021-01-14.mdx @@ -0,0 +1,162 @@ +--- +title: "Regulated Trustline and Clawback Controls" +description: "This protocol discussion focused on proposed changes that make it easier to issue and manage regulated assets on Stellar, including trustline authorization semantics and asset clawback mechanisms." +authors: + - david-mazieres + - eric-saunders + - jonathan-jove + - justin-rice + - karen-chang + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: + - legacy + - CAP-28 + - CAP-29 + - CAP-35 + - SEP-8 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This protocol discussion centered on the evolving needs of issuers building regulated assets on the network. The group reviewed how protocol changes are proposed, debated, and ultimately adopted, emphasizing transparency and public participation through GitHub and the Stellar developer mailing list. + +The technical focus was on two Core Advancement Proposals. CAP-29 addresses long-standing edge cases in trustline authorization by allowing issuers to authorize or revoke trustlines even when certain account flags are unset. CAP-35 introduces asset clawback as a first-class protocol feature, enabling regulated issuers and transfer agents to reverse fraudulent transfers, recover assets after key loss, and satisfy regulatory obligations without disrupting existing assets. + +### Key Topics + +- How CAP-29 simplifies trustline authorization so issuers can migrate between restricted and unrestricted issuance models without stranding accounts. +- Why asset clawback (CAP-35) is required for regulated securities, including fraud recovery and transfer-agent record correction. +- How clawback interacts with existing concepts like authorization, revocation, and claimable balances. +- Design decisions around immutability, transparency, and protecting existing assets from unexpected behavior changes. +- Tradeoffs between protocol simplicity, future extensibility, and issuer ergonomics. + +### Resources + +- [CAP-29: AllowTrust when `auth_required` is unset](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0029.md) +- [CAP-35: Asset clawback](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) +- [SEP-8: Regulated Assets](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0008.md) +- [Stellar protocol repository](https://github.com/stellar/stellar-protocol) + +
+ Video Transcript + +[00:00] Hi everyone we are. Now live. So I just want to say welcome to the first Stellar protocol meeting of 2021. So in a minute we're going to do some quick introductions. But first I just want to take a second to give a quick overview what are we here for. So the Stellar protocol is designed to evolve to meet the changing needs of the growing ecosystem all the different projects, that are building on Stellar and actually the proposals, that we'll discuss today are kind of a perfect example of how, that works right they make it easier to issue regulated assets on Stellar. So the purpose of this meeting is to discuss and plan for those changes in case you're not aware protocol changes, which are implemented in Stellar Core they're submitted as Core Advancement Proposal or CAPs for short and they have a very specific like life cycle they generally start with a discussion on the Stellar death mailing list. And then they're like drafted and further discussed on, that list and kind + +[01:00] of. When they baked a bit we bring them to this meeting where we raise issues and address key questions really start to hammer out the details the draft of a CAP goes through several stages before it's accepted and ultimately implemented into a new version of the protocol and validators have to actually vote to accept, that new version of the protocol in order to upgrade the network. So there's a lot of steps I want to also say, that every step in, that process is public so. If you're interested in keeping up with or even participating in discussions about the future of the Stellar protocol I definitely urge you to check out the Stellar protocol repo on GitHub and also to join the Stellar dev mailing list, that's where discussions about these things originate there are links in the meeting description. So anyone listening you can join right in case it wasn't clear from, that overview what follows is going to be a pretty specific and pretty technical discussion we're mostly going to focus on [CAP-29](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0029.md), which would make some modifications to the allow trust operation in order to + +[02:00] better support regulated assets. And then we're going to talk about [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md), which is what the real probably the bulk of this meeting will be about [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) asset clawback, which provides an issuer with a means to claw back assets from a holder in order to support regulatory requirements there are links to the proposals and to the discussion threads of those proposals in the show description and. If you really want to follow along you should probably check those out it's again there's a lot there. But but it will really help you. If you're sort of looking at the nitty-gritty details. While we have this discussion so, that's kind of my overview. But before we actually start let's do some just quick introductions so. When I call on you just say your name your title and your affiliation I will start I am Justin Rice head of ecosystem at the Stellar Development Foundation foundation Tomer told me weller head of integration Stellar Development Foundation dan I'm dan doney the ceo of secureancy + +[03:00] we're a blockchain-based financial service infrastructure company awesome lee hi I'm lee principal software engineer at the Stellar Development Foundation jonathan hi I'm jonathan jove I'm a senior software engineer at the seller development foundation nicola yeah Nico berry I'm sq get up on the great eric my name is eric saunders I'm a Horizon engineering manager at Stellar Development Foundation Siddharth hi I'm Siddharth suresh I'm a software engineer at the startup development foundation great karen oh we didn't hear karen. But can you try it one more time I think your audios there's something up with your audio. But and + +[04:00] we'll figure it out. But and finally I believe David hi I'm David Mazières I'm chief scientist at Stellar Development Foundation okay great. So let's get started in sort of a last-minute decision we flipped the agenda and we decided to talk about [CAP-29](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0029.md) first it's a smaller discussion and there's less to work out in a way and also it sort of logically makes sense. Because because [CAP-29](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0029.md) has to do with the existing authorization flags on accounts. So [CAP-29](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0029.md) has the catchy title allow trust. When not auth required basically a trustline authorization it's an important feature of the Stellar protocol it allows issuers to handle various regulatory requirements currently there are two flags, that you can set on an account auth required and auth revocable. However the way they allow trust operation works with those two flags probably needs a bit of tweaking. So the + +[05:00] goal is to kind of clean it up basically the idea is, that issuers should be able to authorize a trust line even. If their account doesn't currently have an auth required flag set and they should also be able to revoke trust. If they have the auth revocable flag set on their account even. If they don't have the auth required flag set. So right. Now you have to have them both set, that would basically allow them to deny list certain accounts, which again is valuable for issuing regulated assets. So this CAP it introduces two changes to the allowed trust operation in order to achieve those goals so, that's the summary I think you know anyone can kick off the discussion I think this CAP originated with Tomer. So Tomer do you have any thoughts yeah just quickly discussing the use cases, that we have an issues with right. Now one is, that what happens. When an issuer changes authorization flag. So let's say, that I issued + +[06:00] the Tomer asset with an auth required flag to begin with Justin created a trust line, that trust line is by default unauthorized until I allow off to. But let's say, that later on I changed my authorization flags I removed the authorization the auth required, and now I want to elevate Justin's trust line to be authorized I can't do, that. Because the off required flag is not set. So it creates like a bit of a weirdness I need to reset it in order to set Justin's proper authorization flags. So this is one case, that we want to fix the other case is what. If an issuer wants to have a blacklist asset, that means, that by default the authorization state is true is authorized but. If the issue wants to disallow, that they can de-authorize the state right. Now they + +[07:00] can only invoke the allow trust operation with authorized false. If the auth required flag is there. So this is a pretty unoffensive CAP it just fixes the semantics to make sense yeah, that's it first kind of obvious question why did the issuer onset their flag why not just leave it are you talking about the case. When when an issuer modifies their authorization flags I mean this stuff doesn't work. If the issuer turned their flags off. But I guess I'm wondering why they did, that maybe the you know the authorization requirements changed maybe they started you know. When they did the first distribution of tokens it needed to be regulated. But for some reason down the road it doesn't have to be. So you know the idea is, that right. Now + +[08:00] you know like we don't want you know as long as issuers don't set the authorization flags to immutable we want them to have the freedom to move between these things and this currently gets in the way I feel like there also have been cases where someone started with an auth required asset and realized, that it wasn't necessary and. And so they essentially made an error. When they initially created set the flag on the account I mean I'm all in favor of consistency and this just seems to make our transactions semantics more consistent. So why have weird little rules, that disallow certain things in certain corner cases in certain situations. If if we don't need to. So it just seems like an obviously good idea yeah I'm with David on, that and following up on, that it's like we're making the rules + +[09:00] simpler in a way, that you know anybody who is relying on these rules I mean there's no sensible way in, which you could have been like oh my business is based on these particular rules being in effect you know my application is designed to take these things into account there's no compatibility risk to changing this stuff I don't see how you could have relied on this is this the shortest CAP discussion we've ever had I guess my question is why is it like this I mean presumably we had an idea excellent question eric I don't I wasn't here for the origins of this. But from studying the code I have some good hypotheses about this and I think there were some other things in the code, that we've already changed Siddharth did the work on it I think it was [CAP-28](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0028.md) maybe. But basically like this name auth required is very misleading like from my perspective the name should be something like + +[10:00] auth like off default or something like, that like do you default to true or default to false and, that's what the flag actually does inside of Stellar Core but. Because of, that this name auth required there were lots of places in the code where there were things, that we were doing, that looked like we were going to check this flag on the issuer's account. But we don't actually do, that. And so I think, that this behavior kind of originates from this very misleading name more than anything else I don't know people like nikola might have a perspective on why it was written like this originally. If you can remember back, that far a lot of things happened very quickly you know last year yeah no I don't remember. So it seems like no one has an objection to this CAP I mean is, that correct does anyone have an objection i'd say. If so. Now is the time + +[11:00] what I object to is like psychologically I think we all feel like there needs to be some minimum discussion time for any proposal but. If it's just an obviously good idea let's just like get it over with it done great. Then I stuck to his word and this is 15 minutes or less so, that's, that's impressive yeah it's eerie okay great I mean it's are there any follow-up like questions, that we need to address before we move on just in terms of thinking about the implementation of this CAP or we just move on to [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md). So one last point following on what John said do we think we should take the opportunity and rename these flags I discussed this with liabit and I pitched, that same thing relevant this came up in the discussion of [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) as well and lee kind of convinced me, that like from an ecosystem perspective renaming this would be nightmarish i'll let lee chime in on, that. If he wants to + +[12:00] but I was pretty convinced, that it's not a good idea yeah. So these names they obviously exist in the protocol they also exist in Horizons api and, that consistency is elegant, that you can look at the Horizon api and you can see a flag name you can read documentation, that discusses the protocol and, that's the same so. If we were to change it in one place it would be ideal to change it in other places to maintain, that consistency but, that would break Horizons api. Because the actual name auth underscore required authors are revocable they show up in the api as field names I think the cost, that weighs the benefit of changing the name personally + +[13:00] just take the victory let's okay we're moving on thank you, that's the right way to do it David okay. Then we're going to move on to [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md). So as I mentioned at the top of this meeting [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) defines a method for allowing an issuer to claw back an asset in order to support regulatory requirements it could be used for instance to recover assets, that have been fraudulently obtained to respond to regulatory actions and to enable someone who can prove their identity to recover an asset in the event of key loss or theft. So this proposal, which was updated very recently it currently introduces a new account flag a new claimable balance flag and two new operations one to claw back an asset and one to claw back a claimable balance dan I think you understand the need for this change more than anyone is there anything you'd like to add just a little background on this we are doing a series of experiments + +[14:00] experiments with a number of prominent regulators from around the world and we've we did a pretty significant experiment demonstrating the ability to enforce regulatory rule sets from different jurisdictions in using Stellar's DEX and it was very well received by regulators in, that particular setting, that is the ability to enforce policy in a decentralized setting using the SEP 8 pattern and out of, that discussion came a second discussion, that the regulators are particularly interested in, that has to do with custody and you see assets in general falling into one of two patterns one, which is a bearer instrument pattern bearer instrument patterns have certain benefits in the marketplace in terms of being censorship free. But they also have certain downsides to their nature and this is one, that makes them much more difficult to regulate + +[15:00] and from a regulator's perspective there's a particular record keeper role, which has certain responsibilities into the marketplace known as a transfer agent the transfer agent. If they fail to execute their responsibilities can be sued. But they keep the official record in the absence of the transfer agent being able to correct the record you can't have records on blockchain for securities. But it turns out it with a clawback mechanism you can actually empower a transfer agent, that is a licensed entity with this responsibility to correct the record and why would it do, that and the big thing, that for example the sec worries about is the case of the canadian exchange operator who disappears or dies and loses custody of a number of their clients assets and there's nothing, that the marketplace can do about, that scenario the sec sees for trillions of dollars worth of securities writing in these + +[16:00] ledgers, that would mean, that they have to be securities experts for everyone who performs a broker role in the marketplace and, that to them is just frightening and no one would ever get through would be just way too much of a responsibility not trying to discount the sec's position on these things. But they see the ability to correct the record as an essential component the central piece of correcting the record is executing a clawback. If someone has made a mistaken or illicit transaction. If you can pull back, that asset and put it to its rightful owner you have the means to correct the record. So this is the nature of this element came down to regulated assets as we've interacted with regulators globally virtually all of them require some form of this, which we'll easily fall into we believe the structure of what's been produced here with the with [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md). Hopefully, that provides some background and i'd be happy to answer questions. If there's any anything else on this we happen to be + +[17:00] going in and showing the regulators this in the near future and, that's why there's some pressure to show, that Stellar can be used in a way, that satisfies the world's regulators very prominent regulators, that Stellar suitable for securities I just want to clarify. When you say we you mean. So currency correct great cool I mean there's already been a lot of back and forth on the mailing list about this proposal and it seems like there is there are specific issues to discuss and I think the first one, that's, that came up is about the immutability of clawback capability and how it relates to trust lines and claimable balances lee could you tell us a little bit more about what, that means what the issue is yeah. So it's important to be transparent with an investor or, that is a token holder as to what rights they have and what rates can be removed. So so you want it to be absolutely clear. If they're holding one of these + +[18:00] regulated assets versus a bearer instrument, that this is an asset, that can be clawed back. Because ultimately, that's an investor's choice. If you don't want to have assets, that have this kind of regulatory oversight, that's your right. So you need to as you look at the asset have the indicators, that are going to tell you, that either is or isn't in this category and what follows from, that is even. If it is not in, that this category it can't be placed in this category against their will. So once you have designated an asset as regula regulated even. If it or. If you have designated it as a bearer instrument there's no way, that someone can backdoor you on this and so, that was an important part of the shape of the discussion around the CAP yeah. So to experience immutable or you're saying this anyway. So there's a couple of ways, that we address well what dan is talking about + +[19:00] so the verse is yeah author mutable exists So and still applies to this new flag on the issuer account so. If somebody is using, that they can't enable clawback enabled in the same way, that they can't enable auth required or auth revocable. If they're not set the second way is of the proposal [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) snapshots the state of the issuer accounts clawback enabled onto the trustline. When the trust line is created and the claimable balance snapshots the state of the trustlands clawback enabled flag onto itself as well. So it sort of inherits, that state so. When when an account trusts an issuer today. If the issue enables clawback tomorrow it doesn't apply to accounts, that have already trusted, that issuer they get, that they get the state from. When they trusted. And then claimable balances are the same. So a user or an account + +[20:00] holder can have some confidence, that. If they create a claimable balance tomorrow it will still not be clawed back enabled and yeah I think this is something, that we want to discuss is this appropriate should it be possible to change it, that's actually one yeah sorry go ahead yeah I know like the thing I was going to play is like one of the thing I wanted to and I think the current version of the CAP actually reflects, that one of the things I really wanted to get in this CAP was as a property, that. If we do have assets on the network today, that do not rule this all, that do not expect this kind of capability to exist they should basically not care and I think yeah like right. Now we are actually achieving, that + +[21:00] yeah. So what this actually would mean for all accounts or for all trust lines, that already exist on the network all trust lines would start with this CAP clawback disabled. So this would have no impact on existing trust lines or accounts holding those trust lines and. Therefore no impact on any claimant balances they created from existing trust lines either. But so one problem with the. So so I agree with, that idea in general. But one problem I have with the specific implementation is, that what. If you do want to set or clear the flag on a truss line right. So I could think of like several scenarios where the current proposal would not work. So for example let's say, that the issuer revokes authorization from a trust line and offers to restore it in exchange for adding the clawback flag. So this would be like a very realistic thing to do in response to + +[22:00] some kind of regulatory change where you're like okay I'm freezing your assets. Because you kind of ignored my requests you either have to like sell these assets back to me right. Now or like you know comply with like the new regulation another example would be, that you have different clawback policies for like individual for institutional versus like individual accounts right, and now there's no way to it's very hard to create these institutional accounts you'd have to create these sandwich transactions and those sandwich transactions would not work. If you added off immutable right I mean another thing you might want to do is have clawback only for you know depending on like the limit of the trust line. So like, although I guess people can actually raise their limits after a trust line has been authorized. But you know it might be nice to say like you know what I don't care for people to store a hundred dollars right. But you know. If anybody's gonna store like millions of dollars like I need to be able to claw, that back. Because maybe they stole it or something yeah David this is a piece where. If you take the two capabilities in + +[23:00] combination I think you get the outcomes, that you want without a change, that is except, that there's no way to tell me how to add the clawback enabled flag or remove it from an account from a trustline, that already exists, that seems to be missing either a description of it is missing from the CAP or the mechanism it's a fair point the desire to potentially freeze an account using a mechanism. And then basically force the party to either fall into the new regime or maintain their account as frozen let me separate, that thought for a second and address the second point, that you raised regarding hey there's there may be some difference between a wallet, that it's an institution an account, that's a institutional party versus a retail party or a range of different options we handle, that through the policy component of a regulated asset. So we're able to tag to addresses or wallets specific + +[24:00] characteristics and one might be a characteristic or an attribute of being an institutional party and, that. Then affects the rules on how, that wallet transacts with this specific asset. And so in this we're able to set limits like. If it's not more than a thousand. Then the following things can happen or. If this party has certain rights they can transact with parties who have other rights and we do, that at the movement of value using the SEP 8 strategy and there the policy's gonna know. But hold on. But back the problem is this is not a step problem it's a CAP problem right. So imagine a situation where you know I like you know there's some central bank issues much digital currency somebody else you know like I'm gonna buy like a billion dollars worth of your asset. But it can't be clawed back right. So everybody else has clawback you want, that billion dollar investment right. So you're willing to do, that to me. So we could collaborate together and sign a transaction. But the current + +[25:00] mechanism doesn't provide us any way of doing, that right. If you want everybody else to have the clawback flag on the clawback enabled flag on their trust lines there's no way to sort of create a particular special account, that doesn't have this flag particularly. If you have auth immutable here's how we handle, that scenario. But I'm just. If you know. If this were the challenge, that was presented to me I would handle it this way i'd actually issue two different share classes one with author revocable and one, that could not be clawed back okay. So now I have two. And so those exist and they, that's not the same, that's not what I'm asking for what like you still I want to be able to trade I'm going to buy a million dollars worth of your asset I want to be able to trade, that on the open market you want to be able to claw it back from people who steal the assets. But you know part of our special you know bespoke deal is, that you're not going to be able to claw this back for me. Because I'm laying out a lot of money right. So this is a very realistic thing to do we you know. If we could set the bits to + +[26:00] the right values we would be able to do, that there's no reason why the issuer. If both the issuer and the owner of the trustline consent they shouldn't be able to configure this. But the mechanism proposed here does not allow them to do, that and I don't see a justification for not having a mechanism to do, that. When everybody's on board with the fact, that should happen okay David. So let me offer you two options here well let me offer you one option but, that addresses these two things like I think, that going in the downgrade direction taking an asset, that's clawbackable and making it non-club active, that's completely unoffensive I actually have already spoken with Siddharth about how we would implement, that you know we're pretty clear on what we would need to do it's not hard. But the other direction is not cool you can't take something, that's not call backable and make it claw well I think, that the why shouldn't the owner of the trust line be able to do it I think the issue of the asset should be able to remove the clawback flag and the owner of a trustline should be + +[27:00] allowed to add the clawback flag. But the owner like. If the owner is going to cooperate they can already do this how do you do it wow. So all you have to do is make a like a cosign transaction between the issuer and the asset and the holder the issuer the holder sends the money back to the issuer deletes their trust line and the issuer sends the money right back, and now no okay. So hold on. So first of all one thing there's another I have a bunch of comments on this. So one comment is, that I think, that the CAP should make some suggestions for the ecosystem and the like Horizon should keep track of how many times clawback has been used on any given asset and like how much has been called back. Because this is gonna be information people are gonna want and it is one thing to like you know allow someone to claw something back it's another thing to like actually send them the payment. Because then they could you know. When this comes to court this is gonna be a mess right + +[28:00] they're gonna be like you sent the money back to us. Because of this other agreement or whatever this is very it very much weakens the negotiating hand of the person who owns the trust line. If they have to like literally like send all the money back and delete their trust line what. If they just think about implementing this it's like it's much more of a mess right no I think it's a mess. If you actually allow people to go from something, that is not, that cannot grow back to can be club bad. Because think about it basically breaks any kind of assumption you can you know you can imagine in terms of like pre-signed transactions and all, that stuff. Because at this point like the issuer gets full control on any kind of intermediate step wait a second I can. If I can send the coins back to the issuer + +[29:00] issuer why shouldn't I be allowed to add the clawback enabled flag like, that's clearly like less of a thing to do right I mean I guess I don't have any wait am I yeah okay I don't have any material opposition to doing it's just like since I can give you a way to do it with without changing the protocol like why should I change the protocol. But it's not the same thing right like it's not atomic it's no, that is atomic, that's a single transaction sorry yeah it's it like you know requires me to like cancel all my orders and stuff like I mean it's just it's some total mess right like look in one month. Because of this regulatory change we're going to require all our customers to either add the clawback flag or we're gonna like freeze it by revoking the authorization on the trust line right. And so now the customers they have a month and at their leisure they should be able to do this they shouldn't have to coordinate with us like send the money back to us and like trust us to send it back to them or like do some complicated protocol where we + +[30:00] like pre-sign some transaction this sounds like you know like a far-fetched scenario to basically say hey I have this asset I'm going to make it. Now do a book go up like a far back or whatever yeah. Because like I think like what. Then you're saying like I think the more realistic way to do this would actually to issue a new asset. And then ask people to just move over like it's way cleaner like it's you know you don't have like this mix mode I mean it's not like what I'm asking for is not complicated it's kind of more consistent right like it's just gonna we already have operations to like set flags on our on truss lines right and. But not on yourself, that would be a new thing I don't think you can control your own flights today well in as an issue you can hold on let me look at change trust as an issue + +[31:00] you can. But but like those are like authorization flags right they'll differ I guess for me David like the main thing, that I want to kind of bring up here is like we don't have to do this. Now like we could do it. Now there's nothing stopping us like I agree it's a cleaner way to implement this. If it's something, that people really actually want. But I'm like hey not convinced it's something, that people would actually do based on what dan was saying and. If it is something, that people really do want to do as we just agreed it's easy. So like it's a change in semantics like we don't allow anything like this to say. But there's nothing stopping us from allowing it in the future it'd be an easy thing to do at any time. If the need for it actually comes up we could just do it. But I don't think we need to do it. Now basically okay. But we do need to remove the flag yeah right I see a lot of compelling reasons to have the remove part the move is, that seems right + +[32:00] another thing to consider here regarding John's previous suggestion of co-signing a transaction and doing with the issuer these issuers are interactive right. Now okay like they're probably doing kappa 18 sandwich transactions they're co-signing every transaction. So the interactivity by the issuer is not a big hurdle here it's already there I mean it's just like massively like sending someone all your money like it just like who knows maybe are there like tax implications for this like are you. Because you've like actually given someone the money I don't know I just it's just a bit like there's clearly no security issue like it's it is a bit, that something, that's this someone owns a trust line should be able to consent to. So like why not provide them a mechanism for, that + +[33:00] and just to be clear you think, that there might be a way to basically load, that mechanism into change trust into the change trust no I guess we don't have we don't it wouldn't work and change trust. So it needs to be a new one it would have to be in like allow trust with a different signer but, that's a mess. So yeah I guess it would need another operation. So I'm sorry dude what we frequently issue assets, that represent different rights. So this would be tranches or share classes. So for example you have an instrument denoted xyz and, that is absolutely not what I'm talking about here right like. So I'm talking about a situation where you need to have the same asset as everybody else. Because you need to be like participating in the DEX with this asset. So it needs to you need to have fungibility. So I'm only concerned about the case where you need fungibility. But you have different classes of asset holder just not different classes of + +[34:00] of asset right yeah I actually think those represent different rights in a way, that's me is maybe subtle. But meaningful and of course on the DEX you can always trade one set of rights for another. So you can trade. If if both assets exist one with clawback and one without clawback there's a yeah again I think we're just going down a rat hole it's not productive I do not care about the case I understand, that's a valid case but, that's already supported. So we don't need to discuss it right, that's fair I you're talking about something different from what I'm talking about I'm saying like you know I'm like some I'm hiring someone to you know like I have this bespoke deal where I'm going to give someone a lot of something, that a lot of individuals have a little bit of and. Because of, that I'm willing to give them more rights. So so, that. So it seems like there's agreement, that we can at least support, that scenario and we'll have to wait for the other one. So so I have a couple of other + +[35:00] critiques. So first of all I think, that we have this mentality, that we've like made this mistake of putting the extension switches at the end of structures like. So many times like well why not do it one more time. But it's still a mistake. So I really think, that we should put the union at the top of the structures and not at the end I just I don't just. Because we made the mistake 10 times like, that doesn't mean we should make it an 11th time there's just no justification for adding for like tying our hands in terms of like future extensibility by not allowing us to union the whole data structure. So you know sorry to sound like a broken right structure are you talking about here sorry well any of them like anytime there's let's see let me actually have the thing in front of me so. If I pull up just search for like ext. Because we have these are operations right like you're talking operations then. Because operations we don't need, that we already have the entry claimable balance entry extension v1 + +[36:00] the switch is at the end oh and you're saying to move, that right yeah okay, that's fine I don't care okay great is, that what benefit to just gives you many more options for future upgradeability like you could decide to version the whole data structure instead of just keep like appending like you know more and more nested things. And so it makes potentially makes source code compatibility a lot easier like. Because we didn't have this on transaction we had to like jump through some like pretty gross hoops. When we wanted to extend the transaction format it sounds like there's no objection to, that very specific idea is, that true fantastic. Because I feel very strongly about it so. If nobody else cares please humor me and do, that I do want to make sure also I know, that there's some other stuff about the way, that this, that like how [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) deals with claimable + +[37:00] balances, that I believe there's some discussion, that needs to happen is, that correct John oh you mean classic sorry what was the topic I was looking at the proposal. While you were talking to me oh sorry clawbacks and claimable balances I know, that there is some discussion around how they interact is, that correct yeah there's a little bit of discussion and there's also some interesting topics about like about what flags we should be using or reusing depending on how many more comments David has I think we can go I have two more i'll make them i'll try to make them quick well one is quick and the other well maybe we can defer. So the quick one is please give the data structure for an asset code a name. Because it appears also in the allow trust stop thing and this is a particularly a kind of data structure, that you want a special case. When you're like rendering it in text and stuff. And so to have it like a single data structure called like asset code or asset code name or something you know there's a knowledge you mean + +[38:00] the non-native assets right basically yeah exactly, that's truck yeah I think lee already is making the change perfect yeah. And then the other thing is just kind of to plant the seed, that there's an issue this issue comes up in a lot of other contexts or people have complained to me, that they want to create a fixed number of an asset. And then continue to manage it with like an auth required and it's a bummer. Because what happens is you basically create this account you throw away the signing with a kind of high signing weight and you keep only low signing weight. So you can authorize trust lines. But then you have no way to rotate the keys. And so this situation is going to get worse and, that you'll also have no way to you know fiddle with like the stuff I was talking about before where you change off immutable. So we're not going to fix, that for this. But there's something we should keep in mind. When we do these proposals, that someday we should solve, that problem okay i'll shut up and let + +[39:00] other people you also. Now have no way of fixing the supply. So yeah, that's exactly I was thinking Tomer we've got a lot of people, that's right you also can't fix the supply exactly. So so it'd be cool. If you could like delegate to some other account. So it can't create the asset. But it can revoke it and it can issue trust lines we'll add a level of indirection, that's not for this CAP, that's just an idea to put in tool sets okay we can move on John I think you had some questions about clay mobile balances is, that correct no I think, that we should move and let lee talk a little bit about like the different types of flag options, that we have available to us I think, that based on some discussions, that we've had, that's kind of one of the things, that we. If you look through, that thread on the seller dev mailing list we have cycled through, that topic like four separate times and it's pretty key to actually getting an implementation ready. So I think we should go there first yeah. So what we're talking about + +[40:00] specifically is the proposal right. Now adds a new flag to the account flags auth clawback enabled and what we want to talk about is should is, that make sense does, that deliver a lot of value or should we reuse the auth revocable flag. Because clawback and revocal conceptually they're very similar like one freezes and essentially you can't use it the other one takes it away. So you can't use it I mean both cases the account is losing control over the asset and in a lot of cases we think. If you have clawback enabled you're probably also going to have revocable enabled or vice versa. So in sort of inspired by [CAP-29](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0029.md) in an attempt to like clean up flags or to make flags simple and make it harder to shoot yourself on the foot with them does it make sense just to simplify this and attach it to the existing auth revocable flag + +[41:00] what do people think about, that i'll say from our perspective we can't imagine a scenario where those rights would be distinct. So we can easily see those collapsing into the same right it from a regulatory perspective, that authority is held by the same party I think we I think it doesn't hurt to keep them separate and at the very least there's a sort of an benefit for appearances. So so it goes back a. While but there's definitely this things like this have been like really touchy subjects in the past where people have gotten like really upset at the idea, that you know, that you could like revoke things and I think it could be even. If I sort of think, that there + +[42:00] there may be like a transition argument anyway where like you initially issue an asset. And then later decide, that you actually want it to be clawbackable. And so I actually think there's value in, that. But I also think, that we don't want to feel in any way, that Stellar is trying to you know force people to like consent to like have their assets revoked. And so by taking a piece of functionality, that people might already be using and saying okay. Now you can only use this functionality. If in addition you're willing to give up these extra rights I don't feel good about, that and I don't think and I think there might be people even. If it's a kind of a subtle change and maybe even. If you could argue, that well you could sort of do other things, that would be bad to someone too, that they've already consented to I don't like the idea of kind of it feels too much like a power grab to me, that's fair look we don't have a dog in the fight in the politics of this + +[43:00] of course only just for the sake of simplicity from our perspective and just as one of your many users the right to freeze assets is the only distinction between the right to freeze and claw back is just the mess, that you leave behind. When you freeze and you don't claw back. So from our perspective again from a regulatory perspective. If you can do one you really can do effectively do the other the only difference is, that. But it sort of it also depends like how you wha what sort of real world interpretation people give to these values on the ledger. So maybe you have a company, that's only authorized to issue some number of shares. And so you could freeze shares, that people have but, that doesn't allow you to issue more shares without you know amending your bylaws or whatever whereas you know you could argue well. If you call them back. Then you can just give them to someone else. So even, though like yes we could just kind of by convention agree, that these things are equivalent I think what it looks + +[44:00] like on the blockchain and how easy it is to map, that to what you actually how you actually interpret, that in the real world I think it's just cleaner and easier. If you have two separate options and again it doesn't cost us much to have these two separate flags it just seems less objectionable is there a downside to having two separate flags and. If so what is it well one of the things, that lee and I kept talking about was this weird scenario where imagine, that you are some kind of guy I don't know what to call you. Because as dan pointed out like it's probably not reasonable to do this. But you're some kind of entity, that wants to be able to clawback. But not revoke suppose and you were like okay well like I look at the flags this is pretty obvious all I have to do is set the clawback flag and not set the auth the revoke flag problem solved right not. So so. Because if you do this you are almost surely in a situation where you're about to shoot yourself in the foot. Because if you are used. If your asset + +[45:00] holders. Then create a bunch of offers you will not be able to claw back the assets, that are tied up in the liabilities of those offers the reason for, that is, that in order the only way to cancel the offers for an account is to use revoke. So this is kind of one of those things where it's like it seems like really cut and dry and you know we could paste the biggest reddest sign, that you can think of in our docs. But I guarantee you somebody will shoot themselves in the foot anyway yeah you're right there's four possibilities and three of them are useful are plausibly useful right and one of them probably means you're doing something you don't intend to do well, that isn't it but, that's it, that isn't an argument to have two flags or one flag, though. Because you can just disallow callback on this revocable is also set right. So I mean I get they're tied together and it seems useless. But you could have, that yeah I mean it increases the implementation burden and the mental modeling burden for users. But like I agree there's + +[46:00] nothing there's nothing stopping us from doing, that I mean it seems to be a trade-off between the mental model or overhead of the issuer versus the mental model and overhead of people, that want to understand whether things can be clawed back the thing about, though about separating clawback from authorization I think the one area where this makes a big difference is in the balance entry actually. Because you know as you think about the ways those things work authorization actually is completely async in balanced countries like you have those two part payments right you have the. When you create the balance entry you need to be authorized. And then later to claim it you have to be authorized. But then what happens in between is kind of this funny thing right like and having this as a separate flag allows you to kind of deal with situations like you + +[47:00] know about what we are talking about earlier. When like. If you want to handle for example the different downgraded accounts or whatever, that don't have, that you cannot go back I mean basically you still need to propagate this flag somehow and. If you're going to propagate in balanced entries you could say well I'm propagating authorization, that's kind of funny. Because you know you're always authorized. So what does, that really mean yeah one clarification about the proposal is, that we wouldn't be propagating the auth flag would be propagating like everywhere in the CAP where the auth claw back enabled or whatever the name of, that really, that flag is you just sub in the word auth revocable. So basically it's like you know the clawback enabled flag on a truss line would be set based on the auth revocable flag of the issuer at the time the trust sign is created and similarly for the claimable balance entry it. Then inherits the flag from the. So I it's not. So bad + +[48:00] in this like it doesn't really contact, that but, that's strictly less powerful than what we are where right. Now where you can actually don't like remove, that flag on one account. And then you have this icon, that basically can do you know all sorts of things including balanced entries, that are, that cannot be called back right. Because maybe they are part of like nice smart contracts or whatever where it's very clear, that basically, that thing is cannot be messed with by the issuer. So you're talking about the value of an account where revoking is still sensible. But clawing back is bad, that's what you're saying yeah I can see, that argument yeah I think the big issue oh sorry I think my audio just cut out no please go ahead I don't know I was interrupting you John no I wasn't you're good yes I think the big negative. So really the only big negative, that + +[49:00] i've heard people say about having two flags is just, that you can you cannot accidentally misunderstand how to use them and you can set one. And then you can't actually claw back. Because somebody has offers. So is, that an issue, that we need to revisit like the fact, that you can have club enabled. But you can't really call back. Because somebody has offers like is, that I know we defined it to work, that way. Because from an implementation point of view it's simple or simpler for core and the protocol. But do we need to revisit, that could we have two flags. But just not allow one of the four combinations to be set on an account I would prefer, that over the thing, that lee was saying from an implementation perspective, that's way easier than moving how the offer destruction gets done, which is not good and I don't want to do, that would you be okay with, that lee I mean like I'm okay with basically saying like hey like we're not going to let you do the combination, that's almost surely a mistake + +[50:00] mistake it doesn't bother me I think we could be possible for us to say with [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md), that they both have to be certain but. If in the future we discover, that actually there is a really important use case where we don't want them to be set, that we had a proposal, that. Then removed, that constraint or would it be are we. If we sign up for this constraint is this constraint something, that we need to be supporting forever no. Because we can just allow you to set the account flags, that way right. So this is we're just saying, that. Then you know, that I'm. Then I'm fine I think, that's. So you can set the revocable flag or you can set both flags you can't set the clawback flag. But clear the revocable flag this also means, that for a line, that has revocable set and clawback set at the time it was created, that it won't + +[51:00] be possible to use allow trust to clear the revocable flag. So fun wait we don't have a flag revocable on the trust line oops yeah. So it seems like the solution is have two flags only allow three combos disallow the fourth combo, that shoots you in the foot, that's brilliant. So we've got five minutes left is there any other issue, that anyone wants to bring up. If not lee is offering to give a quick summary, which I think would be helpful great lee take it away + +[52:00] okay. So I think we have five takeaways relating to [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) there's some discussion around xdrs and moving extensions at the top of any new structures, that we're creating, which is an easy one we need some definition for non-native asset code, which is an easy one we agreed to not reuse auth revocable to keep the separate flag to require, that both author revocable and north claude accuse, that at the same time. If callback is enabled. And then finally there was some discussion earlier at the beginning about a need for the issuer to be able to remove clawback enabled for a trustline is, that and there's going to be a common data structure between allow trust stop and the clawback right for like asset code or asset code name or something + +[53:00] oh yes yep, that was yeah sorry the non-native asset code we will introduce some non-native asset code structure, that's common between them I think out of all of it most of these things, that I just summarized are all very straightforward I think the one, that maybe is not super straightforward is the ability for the issue to remove the clawback flag. Because there is some issues with us adding, that to the existing allowed trial stop, which we might need to think about Siddharth was actually the one who originally brought, that to my attention and I think to your attention as well Lisa maybe starts can talk about, that for a second and kind of talk about what we had presented as an opera as an option for how to kind of do, that yeah. So the issue with the loud trust right. Now is it requires you to know, which flags are already set. Because it currently just sets the trustline flags to the parameter + +[54:00] that's passed into our trust and this was fine. When there was only one value. But as you add more flags it becomes a burden on the user ideally it would work the way set options works where we specify, which bit, which flags you want to or, which bits you want to clear. So the what we would like to do in the future is create another operation, that works the way set options works where you can actually you know you can enable cloudback. If you want or you can disable cloud without knowing what the other with the other flags are I mean you could just turn authorized into a union right there's only two values currently used so, that's not true there are already three possible values for authorize and with this there would. Now be six + +[55:00] well yeah I think six now, that all makes sense right. Now we're not saying you don't put the six instead you have a union where the three existing values are just void cases and there's a there's another case, which says set bits and. And then has a like a 32-bit integer as the field I mean whatever this is bike shedding like whoever influenced this should make the decision I'm just pointing out, that they're lighter weight options than creating a new operation, although you know maybe they're gross enough, that you want to create a new operation, that's definitely an interesting proposal, though I hadn't even considered, that. So I think my question here. So it sounds like you know there's more ways we go about this and we can explore, that my question is does this need to be a part of [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) the ability to it should be. If we're going to do it + +[56:00] it should be yeah. If it's it would be better for it to be. If it's gonna like completely derail [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md). Because we argue over the details of how to do something, that there's two ways to do. Then I guess maybe not. But well I feel like, that's like next camp something yeah I think we're out of time for this meaning I feel like we did make a lot of progress on [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) and [CAP-29](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0029.md) too. And so yeah I'm getting into those super fun details I think can happen next time and I just want to thank everyone for being here yeah and welcome back protocol meetings hello 2021 thanks everybody thanks bye everybody + +
diff --git a/meetings/2021-01-28.mdx b/meetings/2021-01-28.mdx new file mode 100644 index 0000000000..c678ff0bed --- /dev/null +++ b/meetings/2021-01-28.mdx @@ -0,0 +1,114 @@ +--- +title: "Clawback Flag Semantics and Visibility" +description: "This overview focused on CAP-35 (Asset Clawback), clarifying how the proposed clawback flag and operations work, how visibility and opt-in are enforced for asset holders, and what changes were required before moving the proposal into Final Comment Period." +authors: + - david-mazieres + - eric-saunders + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [legacy, CAP-35] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session continued the discussion on CAP-35, with the goal of resolving outstanding questions and determining whether the proposal was ready to advance. + +### Key Topics + +- Reaffirmed that clawback is strictly opt-in per asset: + - Issuers must enable the clawback flag before a holder accepts the asset. + - Existing assets and trustlines are unaffected. + - Clawback status is visible on-chain so holders opt in with full awareness. +- Clarified flag semantics and discoverability: + - New authorization flags and operations introduce clawback without altering existing authorization behavior. + - Wallets and explorers can surface these flags to warn users in advance. +- Reviewed the new `set_trust_line_flags` operation: + - Modeled after `set_options`, allowing individual flags to be set or cleared without redefining all authorization bits. + - General agreement to deprecate `allow_trust` over time in favor of the newer mechanism. +- Discussed design feedback and future flexibility: + - Using full asset identifiers instead of asset codes to preserve extensibility. + - Potential delegation of trustline authorization to non-issuer accounts in the future. + - Clarified that clawback returns assets to the operation source account (currently the issuer), rather than burning them. +- Examined edge cases and limitations: + - How claimable balances inherit clawback behavior from the sender’s trustline. + - Implications for payment channels and contract-like flows. + - Acknowledgement that the design involves trade-offs, but favors predictability and explicit opt-in. +- Meeting outcome: + - Minor wording and structural updates were identified. + - Consensus was reached to move CAP-35 from Draft to Final Comment Period, pending those changes. + - A formal notice would be posted to the Stellar developers mailing list, opening a one-week window for final feedback. + +--- + +### Resources + +- [CAP-35 – Asset Clawback (Proposal)](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) +- [CAP-35 Discussion Thread](https://groups.google.com/g/stellar-dev/c/hPhkXhrl5-Y/m/ZF6eJcqKAgAJ) + +
+ Video Transcript + +[00:00] All right everybody welcome to the Stellar Open Protocol Discussion thanks for being here thanks to all the panelists just a reminder the goal of these meetings is to talk about and plan for changes to upcoming versions of the Stellar protocol specifically we go over Core Advancement Proposal CAPs for short, which are open source specs, that describe suggested new features designed to evolve the protocol to meet ecosystem needs. So CAPs they have a life cycle they go through various phases discussion draft Final Comment Period acceptance and implementation before they're finalized added to a major release of Stellar Core and put forward to validators who ultimately decide whether or not to accept the changes by upgrading the network. So actually. If all goes well today we may attempt to move a CAP from draft to Final Comment Period we'll see what happens very exciting procedural stuff there. If we do we'll post a notice on the Stellar dev mailing list and there's a link to, that in the meeting description + +[01:00] at, which point anyone who has comments or questions or suggestions has one week to respond before we decide to either reopen the discussion or mark the CAP as accepted so, that's what happens. If we move this CAP into Final Comment Period. So last time we started the meeting with personal introductions. But today I think we'll skip, that just to keep things moving along all the participants are listed in the event description so. If you want to know who's who just do a quick search or look at last protocol meeting where a lot of the same participants there's also a Q&A box where you can submit questions we probably won't go through them one by one during the meeting. But we do look at them they do inform our discussions and decisions so. If you have a question about what we're discussing and again please about what we're discussing feel free to raise it there. So fair warning this is a technical discussion about a specific proposal and. If you want to keep up I suggest reading through the CAP, which is also linked to in the meeting description, that will give you context. Now today we will continue our discussion from the last meeting about [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) + +[02:00] which defines a method for allowing an issuer to claw back an asset in order to support regulatory requirements. So it can be used for instance to recover assets, that have been fraudulently obtained to respond to regulatory actions or to enable someone who can improve their identity to recover an asset in the event of key loss or theft. Now remember I said we do actually pay attention to the qa well based on some questions, that came in last time it seems like there's a bit of confusion about this proposal. So before we begin I just want to give a quick overview to attempt to clarify a few things. So based on questions, that came in last time this is kind of what I want to point out right the proposal would introduce an optional feature for assets and it does, that by creating new authorization flags and new operations to take advantage of them the goal is to add new functionality to Stellar, that would make it easy to issue regulated assets. Now these new flags will have no effect on accounts, that already hold an asset, that means. If clawback is not enabled + +[03:00] when you obtain an asset it can't be enabled for you issuer can't just come in and change the rules after the fact also. When an asset has clawback enabled, that fact will be clearly visible on the issuing account and on the holders account in other words a potential holder will know in advance, that an asset has clawback enabled they'll look at the asset they'll see, that it has clawback enabled and. If they decide to they can hold it and they do. So fully informed with eyes wide open right people opt into holding these assets and, that's a pretty important point I sort of mentioned all, that just to clarify some questions, that came up last time does anyone else before we dig in does anyone else have anything to add are we just ready to go cool. So [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md). The current state of this it is in draft mode the question, that I think we're ultimately trying to get to is it ready to move into Final Comment Period + +[04:00] pending acceptance. But there are a couple of changes, that have happened since last time there are some bug fixes, that address comments and there's a new set trustline's flag set trustline flags up, that allows an issuer to remove clawback the clawback flag. So I think just open up does anyone have any comments or questions about the changes since we last met. So I think, that the first of all I just want to congratulate the authors I think it's gotten a lot better I have kind of like I don't know three quick comments, that like maybe are just changes you can make or not. And then one like point for more detailed discussion. So I don't know how you want to go through this. If we should start raising points or go around robin or what why don't you start with the three quick points let's just hear what those are okay. So one again this is kind of trivial. But in this set trust line flags off the clear flags + +[05:00] and set flags or pointers and I was wondering why, that is, that for future extensibility in case we have like multiple banks of flags or something we want to turn it into a union. Because otherwise. If you just have a an integer. If you zero. Then you're not setting any flags or clearing any flags. So it seems like the optionality had was there's like redundant mechanisms to not do anything I'm just curious for the rationale. So it matches what set options does right, which is like. If you don't want to change clear options or like you don't want to clear any flags you can just set it to no right. So I just follow, that I don't I think your approach is fine as well I do think we there's an advantage in keeping it the same as set options. But sorry I thought, that instead options, that actually set the thing to a particular value whereas here we're actually setting or clearing bits. So it's like a different. So set options it is the same as set trustline flag. So I think you're thinking about allow trust, that sets it oh right. So yeah + +[06:00] the goal here was to make it similar to set options and, that you control specific bits okay well it's not again this is pretty minor point I raise it sounds like there's a rationale. So it's not a huge deal the other question. So another question is should we deprecate allow trust stop now, that we have set trustline flags up I think. So I think we should but, that would be just something to mention like you know we don't have to like remove it or anything. But we can just deprecate it and like slowly you know eventually maybe like you know way down the line remove some of these obsolete operations. So third kind of quick point or suggestion is in the set line trustline flags op instead of having an asset code should we think about putting an asset there and let me make the case. So it's not cut + +[07:00] and dry. But there are advantages obviously you might say well right. Now it's like completely redundant. Because the issuer should be implicit in whatever the source account is for the operation. However I can imagine situations where down the line we might not want, that to be the case and here's kind of two scenarios where having the full asset in there would make this easier. So one is maybe there's certain flags, that like the issuer can like add the flag or remove the flag. But you can add the flag or something. So we actually want the owner of the trust line to be able to change some of the flags and the issuer to be able to change other flags or maybe the owner can do it in one direction the issue or in another direction the second thing, which kind of comes back to this point of feedback, that i've gotten a lot from talking to people, which is the annoying thing, that. If you issue a bunch of assets you and you want a fixed quantity, that. If you lock the high threshold of the account to known as high threshold you can no longer rotate the keys, that are used to like authorize trust lines. And so I don't think we want + +[08:00] to fix this today. But one thing, that one can imagine down the line maybe is adding like a super user flag to like your trust line, which allows you to not only hold the asset. But actually authorize other people to hold the asset right I realized like maybe we do want to do this it means looking at another entry in the database. When you're transaction processing. So there's arguments against it. But but, that would solve this problem you could like issue an asset you could like create a super user account to manipulate trust lines. And then you could completely lock the original account. And then you could still rotate keys in the other account. So just having the full asset in there would allow us to do, that. Because it would allow us this level of indirection, that a different account could be the one authorizing at the authorizing trust lines and I just to clarify you mean the asset code versus the full asset you mean the aztec code say usd versus having usd colon issuer of, that asset + +[09:00] asset exactly. And so the issuer is redundant. Because it's the source account always. But I can imagine future situations where we don't want to require, that to be the case actually I would say I would support, that like originally actually in the original proposal I didn't care for the clawback operation. So to not specify the full asset. Because at the time we only had our trust but. If we're going to make the change to this new adult trust like we would also make the change to clawback right. And then now we end up with like this is completely open and in the future we can easily make, that without having to add new operations basically. So yeah I totally support, that I just want to echo this i've been hearing from partners this desire to basically delegate the ability to allow trust + +[10:00] trust to separate accounts right. Now we you know we promote this idea of everyone needs to have access to the issue or account and a low threshold. But there are a lot of issues with this so. If the issuer can actually delegate this authority to other accounts. Then I think, that is great cool. So I have one more issue, that probably is might require more discussion. So i'll maybe defer. If other people have quicker things we can come back to the longer running ones I think I thought something I want to comment on this is I think this is really great the idea, that we can in the future possibly delegate who could run these operations as opposed to being the issuer I think, that relates in some ways back to a previous conversation we had about clawback and the fact, that right. Now clawback is only going to return the asset to the issuer and there's no ability to specify a destination. Because I think. If we start thinking and I think, that decision was + +[11:00] completely fine. If we think about clawback being something only, that the issuer operates. Because if they claw something back they can always. Then issue the asset themselves again but. When we start talking about making it flexible enough so, that a delegated account could cause the clawback. Then the clawback is going to return to the issuer. So then we have, that no in, that case it would have to go back to the source account right, which would be this other account. So I think it would be consistent with the behavior okay in a thick attack situation you would actually not be burning assets in this situation you would actually be moving assets from the account being called back to the this kind of you know type of account. So is the clawback mechanism set up to claw back to the source account currently why it's implicit right it's implicit okay + +[12:00] I mean we could add a destination account. So imagine a situation where whenever you claw back you actually want to send it to some like arbitrator who's gonna like decide whether this is just or not or something as a way to, although really you'd want to kind of force, that. So maybe, that's not such a good idea I think it's better to have it like go back to the source icon. If you're the issuer you end up burning it and then. If you want to send it to an arbitrator, that's actually the second operation. And then you do have clear kind of separation of like what is actually happening the history. So it sounds like we do need to change the definition in the cup slightly just to be more clear, that the clawback is returning the asset to the source account and right. Now the source count is only the issuer. But like. Because I think right. Now this language, that talks more is more focused on clawback burning the asset rather than moving it. So to be clear like in [CAP-35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md) would not be + +[13:00] like it actually doesn't make sense to do clawback from a non-issue icon. Because you know you can't actually you don't have the right to do it. But I agree in terms of wording I can actually say. So second right it will just work. Because you know adding this functionality is actually adds a bunch of complexity, that I don't want to do in capital e5. Because you have like limits and all, that stuff, that the shirt doesn't have. So you have like new fader modes. And so the suggestion is just to slightly tweak the language. So it's clear, that. While we're at it I did notice one really nitpicky thing, which is, that this new operation, that was added recently set trust lines flag op does appear in the semantics. But not in the abstract so. While someone's going to tweet, that language my request is please add, that to the abstract portion of the cap + +[14:00] I can take, that cool does anything does anyone have anything else before we take here David's more potentially rabbit hole question. So I have one comment related to the suggestion to make the asset code an asset we in the original subtraction flags proposal there was there's a similar change allowed you allowed either the issuer or the owner of the trust line to operate on the operation or operate on the trustline flag and one concern, that came up, that John brought up was, that he would he preferred he would prefer it. If we just use a different operation for. If the owner wanted to modify the trustline flag. Because he said you know it would be it would make it would be clearer than allowing this like this other this brand new functionality, which we don't even allow at the moment right I personally I don't think it's + +[15:00] it matters, that much I think it's fine. If we enable the owner of the trustline to modify a flag in the future but, that is one concern, that he had so, that's a decision we're not taking, that decision today like, that may be perfectly fine all we're saying is we're gonna leave the flexibility not otherwise we'd be making a decision today, that we have to do it, that way and we're leaving our options open, that was only one of the two uses, that I could think of off the top of my head of having a full asset instead of the asset code the other one being this delegation, which both Tomer and I guess have heard requests for from people okay yeah I think, that makes sense all right David I think we're ready for your bigger question okay. So the bigger question is and I don't know what to do about this and maybe it's not, that big a deal. But the. When you. When you're clawing back a claimable balance + +[16:00] the problem is, that whether or not you can clawback a claimable balance depends on the person who created the claimable balance not the person who can redeem it right who can claim it and this seems like a little bit unfortunate in terms of for example you know how you arbitrate a situation where like you know I sent you a claimable balance. And then like I couldn't claim it anymore. So I considered it was yours. But it somehow got clawed back before you were able to claim the money and maybe you had some special arrangement where like you didn't have clawback or something. So this is. bad and another thing where I think this would come a lot is kind of at the end of some complicated protocol like a payment channel or something where you kind of like at the end of the day I give you a bunch of assets and you give me a bunch of assets. Now it's sort of like I would expect one of those oh you know you both either both to + +[17:00] happen or neither, and now there's a possibility, that kind of the wrong one could get clawed back like my funds could get caught back. Because you were subject to clawback. But you were the one who'd like created this claimable balance. So given, that the claimant you know given, that you can't you don't necessarily know who can well anyway so, that's the problem. So should we do something about this and do we care and. If so you know what should we do I mean like to me, that's, that's actually why I wanted to have this flag in the first place on the balance entry. Because you know. If you have. If you don't have a clawback enabled on the balanced entry basically it's immune to kind of interference like, that from for smart contracts yeah for the ones, that do have, that flag enabled I agree as soon as you touch it as an issuer you're actually kind of blowing up a whole + +[18:00] smart contract and I yeah. But the problem is like it feels like in most cases you'll be blowing up the wrong leg of a smart contract you don't know the problem is, that you don't know yeah. So you have to do a lot of like offline type of kind of work before actually doing this claim can you explain to me what the use case would be in this situation how you'd be using payment channel payment channels with claimable balances for assets, that had clawback enabled like is this something, that there's, that a reasonable person would be using these assets in, that way sure I mean this like whether or not you want to use the payment channel on an asset should be kind of independent of whether you can claw it back or not. But but the point is, that like most in most of these protocols you kind of assume, that the funds are yours once + +[19:00] you can claim them and we're. Now slightly changing, that situation and, that like they could still get clawed back. Because of something bad, that the issuer did even. If the recipient has some kind of a non-clawback agreement with the issuer, that would be on the recipient's account right on their trustline flags. So like the sender and the recipient both have this clawback flag they can be set or onset in their trust line so. If the recipient doesn't have it set in their trust line. Then they know ahead of time, that it can't be changed for them right except, that they're the claimable balance, that they can claim can be clawed back unless I'm misunderstanding right and only. If it has the flag only. If the sender had the flag enabled right. So in a way it's like you wish, that the policy had to do with the recipient rather than the sender I guess is kind of what I'm saying but, that also doesn't work right. Because you + +[20:00] don't want to just be you I shouldn't be able to like protect my funds by like sending them all making a bunch of you know claimable balances, that people can't claim. And then reclaiming them afterwards. So I don't know what the solution is it just it seems like it's something slightly annoying recipients also don't have to exist at the time the claim will balance yeah exactly. So there's a lot. But there's no guarantee we even have, that information I think the only things we can guarantee exists on the network. When the claim balance is created is, that the issuer exists and the account holder exists the original account the sender exists and I think. If we were to choose from one or the other where to get, that information from the sender is better. Because if we use the issuer then. If the issuer enables clawback. Then a sender whenever they create a claim or balance it's become going to become clawback enabled even, though they maybe have an agreement with the issuer, that. When they hold the asset it isn't + +[21:00] can't be called back. So I guess my question is what does this where does this conversation lead what action needs to be taken to sort of resolve this issue I mean you know maybe we can't resolve it right maybe it's just yeah you know it's not there doesn't seem to be any ideal solution here and you know I agree, that it's kind of at least as reasonable to have the flag and a claimable balance be based on the sender than to have it be based on the issuer so. If the given, that those are kind of the only two options I can see on the table maybe we just live with it the spec currently specify using the sender + +[22:00] sender I can't remember it is yeah in everything the yeah okay so, that's the standard. Because that way like the reason I wanted, that in the first place was. Because if you're going to engage in a payment channel let's say at the beginning like you can actually look at those flags, that people have on their trust lines and decide at this point you actually want to basically take the risk or not in, that payment channel right. If people don't have the flag set you know, that basically you're taking a zero risk. If you have the flag. Then you have a question I don't see. But the flag can change like during the execution of a protocol right. So you no, that's the point is, that. If the flag once you have. If you have the flag not set people cannot like issue I cannot set it back it can only be on set okay, that's a, that makes it, that's why this works right + +[23:00] like. If we are allowing the issuer to basically renege on the on this it also breaks, that so, that yeah. So anyone would be up they would understand this fact they would opt in to choosing to use the asset in, that way again eyes wide open yeah like. But here the important bit is like you can imagine having some accounts on the network, that don't have, that flag enabled even, though it is like the issuer has the callback enabled. But they're like you know maybe privileged accounts or something like you know better cheers basically of accounts, that can actually engage into those payment channels or whatever I like to yeah I agree, that it's not ideal. Because like really what you would want is like any asset I mean any account should be able to participate to payment channels. But like the thing is, that as soon as you potentially have this third party kind of interfering it gets really complicated + +[24:00] I don't see yeah. So I can basically live with it I think you guys have convinced me, that this is probably fine and anything else is going to be worse I think I also want to mention I think this is a limitation of claimable balances in general in the claimant balances the only place they can go is into the account is into a claim is into the an account, that's defined as a claimant. And so for their use in smart contracts there's, that limitation already and may like maybe there's room for improvement in the future for claimable balances to have other destinations. So you know you could take funds from a claim balance and move them into another claim will balance all into multiple claimable balances and. If you know. If that is ever a possibility in the future you know, that opens up more flexibility for smart contracts. Because you know. Then we can start talking about you know they can inherit + +[25:00] the attributes of their previous kind of balances or different things like, that but, that's I think, that's like, that's off topic a bit. But I think there is some limitation, that we're inheriting from the fact, that claimable balances only have accounts as a destination okay well I'm satisfied nothing needs to change in the CAP basically after this. So the one there's basically one minor change, which is to replace the asset code with, that oh sorry I meant for the last part of the decision sounds like for the other one we agreed, that we'll add a note, that allow trust stop is deprecated and we will add to turn the ass in two places we'll turn an asset code into an asset I guess right, that's correct I think those are the changes, that we've suggested making does it does, that seem accurate to everyone else + +[26:00] yeah and. If so those feel like pretty correct me. If I'm wrong sid for instance those feel like pretty minor changes to execute is, that correct yeah they should be like, that like we could move this into Final Comment Period pinning those changes does, that seem correct yeah all right guys I say we move this from draft and Final Comment Period I kind of want to do like a formal process here we have to say I or nay. So let's do, that Nico yes sid yes David chalmer yes eric yep is, that is, that everybody I think, that's everybody who's on this call guys I actually have a gavel. So I'm going to strike this gavel and can you see it, and now it's officially moved into Final Comment Period + +[27:00] I happen to have, that gavel in this room full of junk I did not go buy it especially for this I promise and so, that's mostly it I mean the only other thing, that I guess we could do, which we can also decide not to do is we could talk a bit about where this CAP is. Because I know, that some implementation has already been done do you want to walk us does anyone want to walk us through, that or should we just be happy with this sort of procedural vote, that we just had and close the meeting I think, that's probably enough for. Now like in the context of this meeting okay cool meetings, that end early are great there's just no reason to stretch out to the full time. If you're done okay cool. So we're going to move into Final Comment Period pinning those changes so. When they're done I will send out the official notice on the Stellar dev list and again anyone who's watching, that gives you a week to for final suggestions we can reopen the discussion. If it seems necessary. But this is Final Comment Period pending acceptance great well we answer the questions we + +[28:00] move this along I think we're done everybody feel good great yeah thanks everyone for watching see you next time cheers you + +
diff --git a/meetings/2021-03-11.mdx b/meetings/2021-03-11.mdx new file mode 100644 index 0000000000..641d78ca2c --- /dev/null +++ b/meetings/2021-03-11.mdx @@ -0,0 +1,162 @@ +--- +title: "Generalized Preconditions and Payment Channel Safeguards" +description: "This overview focused on CAP-21, which generalizes transaction preconditions to support relative timelocks, safer payment channels, and more flexible sequence-number handling. The discussion explored design tradeoffs, implementation concerns, and next steps before prototyping." +authors: + - david-mazieres + - eric-saunders + - jonathan-jove + - justin-rice + - karen-chang + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [legacy, CAP-21] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This meeting revisited a revised draft of CAP-21, aimed at expanding Stellar’s transaction preconditions beyond absolute timebounds. + +### Key Topics + +- How generalized preconditions extend `timeBounds` into a richer model using relative time, ledger gaps, and sequence-number ranges. +- Why relative timelocks are critical for payment channels, enabling “disclose then finalize” flows, dispute windows, and unilateral recovery without excessive delays. +- Improvements to sequence-number handling: + - Allowing transactions to be valid over a range of sequence numbers. + - Reducing brittleness for pre-signed transactions, payment channels, and multi-server submission workflows. +- Broader use cases beyond payment channels: + - Time-delayed account recovery. + - Queue recovery and safer automation. + - Better interoperability with protocols that rely on relative timelocks (e.g., Lightning-style designs). +- Design and implementation considerations: + - Tradeoffs between simplicity and extensibility in precondition structures. + - Forwarding and queuing complexity in Stellar Core. + - Agreement that the proposal meaningfully simplifies previously complex patterns. +- Outcome: + - General consensus that the revised approach is sound. + - Follow-up work identified around implementation details and further prototyping. + +### Resources + +- [CAP-21 – Generalized Transaction Preconditions (Proposal)](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) +- [CAP-21 Discussion Thread](https://groups.google.com/g/stellar-dev/c/N8vzP2Mi89U/m/5SE67XxbAQAJ) + +
+ Video Transcript + +[00:00] Hello everyone welcome to the Open Protocol Meeting, which is being live streamed and for all of you watching at home I just want to give a quick overview the goal of these meetings is to talk about and plan for changes to upcoming versions of the Stellar protocol. So we go over Core Advancement Proposal aka CAPs, which are open source specs and they describe new features designed to evolve the protocol and meet ecosystem needs and the CAP life cycle begins with the draft and today, that's what we're discussing a draft specifically a draft of [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), which is titled generalized transaction preconditions this CAP has actually been kicking around for quite a. While it was first created in may of 2019. But with the recent addition of claimable balances, which were introduced in Protocol 15 there were some new possibilities, that opened up and this week David Mazières has been adapting the original proposal with those claimable balances possibilities in mind. So fair warning this is a + +[01:00] technical discussion so. If you want to follow along I suggest reading the CAP and the recent developer mailing list thread about it both of, which are linked to in the event description. So I think you know just a sort of quick overview of [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) it generalizes the time balance field in transaction to support other conditions including conditions, that relax sequence number checking and, that provide relative time locks it does, that by extending account entry to keep track of the time and ledger number, which an account sequence number was last changed it also replaces the time bounce field of transaction with a union, that allows more general transaction preconditions. So the goal is to advance network scalability by facilitating off-chain payment channels and, that's sort of like what we're talking about today is this are there questions, that we can answer will this CAP help create those payment channels it's also to advance security and simplicity and interoperability with other networks by enabling relative time locks and finally it should make it easier for + +[02:00] developers to create highly usable products by enabling time-delayed queue recovery. So today it's the first discussion of this newly revised CAP. So part of what we're trying to do here is really just get like a sanity check before we start prototyping anything does anything have anyone have anything they want to add here at the top yeah as well as insanity check I think there's also. So this one question is the overall approach saying and the other question is Nico's raised some points many of, which are you know things one could go either way on. And so it'd be nice to just kind of see what the consensus is for where we should go great I mean and since this is sort of something, that you've been working on do you want to start out with any questions, that start out with any questions, that you might have or sort of pick a point where you think the discussions will start we can go from there yeah I mean I think thanks for I think your summary was pretty good here I mean I guess the things we could discuss are one you know does this seem useful for + +[03:00] like a bunch of different scenarios. Because if you have one mechanism, that supports like a bunch of different things then, that's good. So i've got four examples of things, that you can do with this you know. If people accept those, that's great. If people have questions it's also great. If people have other things like could it do this like I was very grateful lee kind of suggested this, that you have a one-way payment channel and maybe this isn't perfect. But this is. But the fact, that I was able to cook something up pretty quickly using this mechanism to do something, that hadn't been one of the things we'd anticipated was you know an encouraging sign I would say yeah. And then the other thing is like sort of nitpicky like technical questions, that we should get consensus on. So where should we start application like uses or just dive right into the nitpicky questions David can you give a high level overview why these things are relevant for payment channels ah okay good. So the kind of overall + +[04:00] the kind of overall architecture of a lot of payment channels and a lot of other protocols frankly higher level protocols on blockchains involves kind of a two-phase thing where you want someone to be able to kind of recover unilaterally from a failure. If the other party goes away or starts becoming non-cooperative. But you don't want people to do something invalid. So there's kind of one phase where you disclose, that you're going to do something. And then there's a certain amount of time in, which someone else can object. If you're doing something, that's not quite right like you're trying to for example close a payment channel with an old closing state and there's a more recent closing state. But then. If nobody objects. Then after, that time delay you're able to close the channel. So this pattern of kind of disclose. And then finalize something is extremely important in a number of contexts. And so with without [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) it is possible + +[05:00] but. But both extremely tricky to do with Stellar and you have to sort of pay a time penalty and, that things where like for example. When someone fails you might in a payment channel you might lose access to your funds for twice as long. Because you cannot the only way to do it is with kind of these pairs of transactions, that have non-overlapping absolute times in them essentially. So we've done stuff like this like starlight a long time ago was a proposal it uses it just gets super complicated. And so the idea here is like let's just make it a let's just provide as a primitive the thing you actually want, which is kind of the ability to do something after some relative delay and since we already have this notion of accounts and sequence numbers it seems like and since sequence numbers are extremely useful for sort of invalidating a bunch of stale transactions you can just kind of give them successive sequence numbers it seems like the simplest way to address this is to just have a transaction precondition says + +[06:00] have a transaction precondition says you can do this transaction. But only. When the account has been idled for a certain amount of time therefore. If the attack count gets changed. When someone discloses, that they want to do something you have to wait, that amount of time before they can actually do the thing and in, that time somebody else can object by you know raising the sequence number further and invalidating whatever you were going to do to close the transaction. So so, that's kind of what led to this the proposal. So it might be worth mentioning, that the bitcoin protocol has relative time locks they added it specifically for enabling the lightning network just as a reference. But it seems, that David like you were talking about relative time locks. But your proposal is actually much bigger than, that and also includes some sequence number changes and maybe you can talk a bit about, that and why, that's relevant yeah. So so the + +[07:00] so the sigma summer changes are. Because we have basically it's our sequence numbers are extremely brittle and hard to use right in protocols. Because basically you have to have the exact sequence number in order for a transaction to run. So so for example one consequence of this is, that in a you know. If we don't have [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) and you're implementing payment channels the kind of disclosure transaction, that sets you up to close payment channel needs to be signed on multiple source accounts you need multiple versions of it. Because you know. If you submit yours. But you submit the wrong one. Then I still need to submit one. And so and. So it needs to be on a different source account. Because you've already burned the sequence number of yours another example is you want to set it like a pre-signed + +[08:00] transaction, that will, that you can kind of submit at any time to do something like clean up some mess or something and again the account, that's only valid. If the count is like in a particular sequence number. And so without I think overly complicating things I added a feature where you can optionally for an account say by the way this transaction is valid for a range of sequence numbers. So whenever you execute a transaction you always leave the account state the invariant is the transaction is only valid. When the sequence number of the transaction is greater than the sequence number of the account and after you've executed a transaction the sequence number of the account is always the sequence number the transaction you just executed until. Now it's always been the case, that it also, that the sequence number the account has to be exactly one less than the sequence number of the transaction in order for the transaction to be valid + +[09:00] with this new optional feature you can add a different a minimum sequence number. So you can say actually I want this transaction to be valid for any sequence number below the transaction or for you know the last hundred sequence numbers. And so this makes transactions, that you've kind of signed ahead of time a lot less brittle the particular you know particular example of where I use this well actually it seems to come up in actually all of the proposed applications. So in payment channels it's useful. Because I can submit a disclosure transaction and. If it's the wrong one you can submit a disclosure transaction and they can both just be on the escrow account we didn't need to have our special other accounts just for the sequence number. Because because you know an earlier disclosure transaction doesn't invalidate a later one. Because the disclosure transactions are signed to accept a range of sequence numbers you know another place where this + +[10:00] might come up is where you have you want to be submitting large numbers of transactions on one account you have like a farm of 100 servers, that are all like you know each round they're kind of submitting a transaction on the same source account and using the new feature you can accept gaps in the sequence number space so. If like one server happens to be down or not produce a assigned transaction time, that's okay you can just skip over its transaction another example, that I had was recovering like. If you want a friend to be able to help you recover your account in case you lose your keys you know you want to pre-sign a transaction, that you give to them, that they can just submit and will add their signing key to your account. But of course you don't want them to be able to do, that immediately you want to have some chance to object in case somebody like steals their key. And so again you want + +[11:00] want one transaction you can just submit, that will bump this disclose, that this is going to happen and bump the sequence number all the way up to the point where your friend can take over the account. And then a second transaction with a relative time lock, that allows your friend after a few days or whatever to take over, that account and help you regain access to your funds one thing, that I wanted to can you guys hear me yes excellent one thing, that I wanted to kind of ask about was how you anticipate this interrupt excuse me interacting with claimable balance IDs, which are sequence number dependent is the anticipation, that. When you do one of these range bounded transactions on the sequence number, is bumped to the last sequence number prior to submission like prior to the operations being applied so, that no matter where you were applied in the range the any claimable balance generated would always be generated using an ID, that is equal to the actual sequence number + +[12:00] of the transaction. So there should be no change the definition of the operation ID still applies unchanged in the sense, that every transaction still has a sequence number the only thing we're changing and always leaves the account with, that sequence number. When it's finished executing the difference is simply, that the transaction may be valid even. If a transaction with the prior sequence numbers have not executed yet. So just to make sure I'm 100 on the same page as you. If the sequence number bounds were minimum one maximum three whether I played the transaction at sequence number one two or three the claimable balance would always be created as. If it was ab sequence number three I think what you're saying is technically correct but, that's not maybe not the clearest way to view it. So I just view it as there's no maximum sequence number there's just a sequence number, which is just the sequence number of your transaction. And then there's a minimum sequence number, that says. When your transaction starts being valid and the minimum sequence number by + +[13:00] default. If you don't specify it is just one less than the sequence number of the transaction meaning like every transaction can be submitted at exactly one sequence number. But you can reduce, that minimum sequence number down to all the way down to zero. If you want the reason I'm kind of asking about it specifically in this way is, that the language you this way is, that When the transaction finishes executing you will be at blah sequence number yeah. But I actually think, that the phrase you want to use is. When the operation start executing you will be at blah sequence number. Because otherwise it's quite ambiguous about, which sequence number you're at. When you're actually doing these operations and also it's quite ambiguous what it means to a bump sequence okay. So let me actually address. So there's several points. So I. So the way I was viewing it is, that the operation ID in a claimable balance is depends on the actual sequence number of the transaction not the reason is, that I was assuming + +[14:00] that you kind of execute transactions in two phases one in one phase you kind of claim all the fees and bump all the claim all the fees and bump all the sequence and address those sequence numbers and you know validate the sequence numbers. And then in a second phase you do all the operations including things like bump sequence. But it is currently the case, that for example you could submit two transactions in the same block right and depending on the order in, which they actually one of them could like bump sequence to invalidate the other right. If they have different transaction level source accounts and so, that behavior is sort of unchanged in, that like all the validation and checking whatever is still all happening before bump sequence. But regardless of bump sequence. When you're generating a. So forget [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). When you're generating an operation ID it's still based on the sequence number of the account in the sense, that. If you have a bump transaction, that's not going to affect the operation IDs right I'm actually + +[15:00] I'm actually trying to confirm, that right. Now I was under the impression, that it was based off of the actual sequence number of the account at the time, that it executes so, that would make it difficult to predict the operation IDs, which I think might be a problem with claimable balances. If it's the case so, that might be something independent of [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), that we might want to revisit regardless right maybe it's not. Because you don't know the order in, which transactions on different source accounts are going to execute in the same block. So you could have a situation where you have a transaction it you know it creates some claimable balances and then. Because some other transaction happened to call bump sequence, that the claimable balances have a different op ID, that would be oh. But no we no like the oh. But no we no like the sequence numbers are strictly like they are enforced at the time we + +[16:00] apply the transaction yeah David is right, though actually it comes from the transaction sequence number not from the account sequence number. And so given, that's the case [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) doesn't change, that. Then it [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) doesn't change, that. Then it doesn't every transaction still has a unique sequence number it's just we're. Now can skip some sequence numbers we're a little bit more flexible at. When a transaction runs I have kind of a basic question, which is what was the original rationale for sequence numbers replay prevention. Because we don't have utxos we need to we just need to make sure, that like a valid transaction cannot be submitted multiple times to you know double the payments the reason I'm asking is. Because I could foresee, that. If this is a feature for convenience maybe many people would use this feature. So they don't have to think about sequence numbers and, although that's like a choice, that they've made maybe this actually makes their implementation less secure + +[17:00] well no they still need to think about it's still it still prevents replay. But but one thing, that this allows for example is, that let's say, that you have a fairly low volume you know account where you're not doing a lot of payments you could decide, that rather than query for the latest sequence number the account you could just kind of use the like the unix time epoch as like the low 32 bits of your sequence number right and, and now sequence number right and now. If you set your minimum sequence to zero, that would just work obviously you would it would not work you would like get some transactions rejected. If you happen to on two different machines use the generate transactions at exactly the same time and use the same sequence number but. If that's unlikely to happen. Then then maybe, that's a better trade-off in some situations and we would allow, that and just following up on just to make sure it's super clear to you eric like this wouldn't let you ignore sequence numbers imagine, that you wanted to be like hey you know I'm gonna set the minimum + +[18:00] minimum number to one and the maximum number to n64 max, that's not going to cause any problems. But again there's no match I'm not going to work there's no maximum number exactly please don't use the term maximum number it's going to take you straight to in 64 max you can do this one time, and now you're a count stock I suppose we could we should actually do the maybe we should add the limitation the same way bump sequence doesn't let you bump to n64 max right. Because we have some idea, that we want to be able to delete the account. And then recreate it and not have old recreate it and not have old transactions be valid it does let you bump into 64 max actually it just stuck at, that point. And then you're just stuck you need to use another okay. Because we can't delete the account okay. So great. So fine we can just allow to have the same thing. Then yeah. So so is one way to think about this, that it's a bit like a normal transaction with a bump sequence built into it yeah. But but with a slight built into it yeah. But with a slight advantage, that the bump sequence happens later in the processing of a block and this happens + +[19:00] happens earlier during the initial phase of like collecting fees and validating transactions. But yeah it acts a lot like an implicit like basically you're implicitly your sequence number is implicitly a bump sequence. Now so where there are also you know I believe on the Stellar dev mailing list there were some specific technical issues, that Niko raised and I want to make sure, that we do. If they're important, that we do have time to get to them does it seem like time to move on to those are there still more sort of high level or general questions let's get specific okay. So so the first one is, that I mean this is something I feel strongly about. But it's not the hill, that I want [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) to die on. So maybe we end up just deferring this to some leader CAP. But the way we do extensions for unions by kind of + +[20:00] kind of dangling more and more nested structures off the end of a structure is kind of messy and gross and it seems like the reason we're doing this is just. Because we're like we don't have enough like engineering resources to kind of fix each individual SDK in a way, that like frankly wouldn't be, that hard. If we had someone who is like living and breathing, that SDK. So so I you know I think it's much better to do your have your extensions to unions be kind of like having like an outer union. So you can just kind of keep improving the data structure and you know keep most of the fields the same as opposed to having like many nested unions, which is both you know encodes in a more it takes more bytes to encode and is like more of a pain to program to. Because you have all these nested unions. So I'm suggesting, that the account entry extension, that we replace the thing, that's currently a v2 dangling off of v1 + +[21:00] with just a v3, that has everything in it and this isn't even like transaction state right this is like ledger state. So like. But by the time you talk talking about software, that's actually parsing these like ledger entries like we're already in like pretty advanced software and not like random end users who were like doing XDR parsing on the actual blockchain state as opposed to transactions I mean I think the by the way this was the last thing on the list I think in terms of priorities I don't know why we're spending time yeah. So again right. Now I mean we can't okay why don't we defer this to the end. Then so the next question is, that and this is kind of a more general point. But it's like coming up here too we have we're using a combination of signed and unsigned integers + +[22:00] for durations and time points and it would be super nice. If we could kind of unify, that across everything. And so my proposal. So I don't have a strong opinion on, which. Because 62 to the 64 or 2 to 63 are both very large numbers of seconds John has suggested, that it's useful to you know do queries in sql and, that sql doesn't have good support for unsigned 64-bit entities not just sql it's also no other languages sure. So so what we could do is change time point to be assigned 64-bit integer have duration be assigned 64-bit integer and we would just say, that any time bounds, that is negative is just treated as like you know n64 max basically for backwards compatibility and I doubt any why would anyone nobody is going to care about. If they've already signed a transaction whether the time bounds is like 2 to the 63 or 2 to + +[23:00] the 64 seconds from the epic right I mean I think a priori I think, that sounds fine to me i'd have to just look carefully through everything and make sure, that it doesn't break anything terribly I mean. But a priori I agree like nobody cares. If something's gonna happen in 2200 or I don't know two to the 64 must put you into like the 30 000 or something I don't know something like, that no I mean it's got to be much more than, that. Because 32 bits already gets us to like 20 30 something right. So 16. Yeah no yeah you're totally right you know he death of the universe kind of thing like I think we got bigger problems to worry about at, that point in time like yeah even changing crypto yeah exactly I think my problem said 25 deep by, that point okay. So for, that one is, that where we're landing it sounds a priori okay. But like we should go to sign + +[24:00] 64-bit 64-bit and we just have to be a little careful about legacy you know do it in a way, that doesn't mess up legacy transactions okay. So another question. So someone. So again you know I just like doing things nice and simple and figure we already have unions like we've already got two kinds of precondition. Now no precondition time bounds. And then these like general preconditions so. If we want to add more stuff we could just add a new kind of precondition you know. But an alternative would be to kind of turn this into a little language like the claimable balance preconditions and you know having like an array of different things you know again the proposal could work either way I like this way. Because it's simpler but. If + +[25:00] there's overwhelming desire to go the other way we can do, that as well I guess one concern you can go first Nico I was just waiting to see. If anybody wanted to go first yeah I know I was like thinking, that. If we just do this pattern right without the array I think it would just end up growing. If we add more condition even actually with the current version like you always use like the worst almost like max maximum size bikewise, that doesn't seem like a you know a good property I think in terms of complexities you know we can make it, that it's actually fairly simple like I would say like you know the array has to be strictly ordered by you know value for example right like you cannot put like, that's where you have like a normalized + +[26:00] form right you can't you have to put for example time bomb before sequence number right things like, that and. If you did, that could you expand the possibilities yeah, that's it yeah I mean this thing is like 20 bytes right. If it's not. doing anything. So let's see there's one two three four yeah five things. So like 20 bytes I don't know 24. Because the direct duration is like 64 bits. But I mean especially for things like yeah I don't know I mean I can make it work the other way it'll be like you know more complicated to implement everywhere there'll be like more weird edge conditions you know I think, that in the end maybe you'll shave you know eight or 12 bytes off the size of some + +[27:00] preconditions and others will be larger and you know maybe like four bytes bigger or the same to me I don't know like to me it's like the you know it's called generalized. But it's not generalized type of thing you know it's kind of you know we want it to be generalized like a place where we can add more and more conditions. And then what. If I called it something less arrogant than general preconditions like you know relatives or relative general preconditions v1 or something I guess you yeah you ask for it. Because it's like. When people call the function you're smart something you know it's not smart right. But no I'm just saying like. I don't necessarily think it's a problem I think it's more of a yeah the pattern like what is the pattern, that we want to have here and I can see, that fairly you know maybe not in the far future we'll have like yeah like some new conditions, that pop up and do I want to again like + +[28:00] you know have like it's it goes back I guess to this other point about half the weekend yeah fractures right how much pain do you want to have in the yeah and I guess here I just I don't agree, that the pain needs to be very high I just think there's a there's like a kind of a bug in our ecosystem, that we make these union upgrades more painful than they should be part for me kind of what I'm looking at here is. If we think, that this is a pretty reasonable representation of all the things we'll need in the near future like near future being like let's call it the next year I mean we lived with just sequence with just time bounds and single sequence numbers for like I don't know much longer than, that five years I guess so. If we think we're gonna make it, that kind of length I don't feel any need to go super crazy making this complex and extensible. But I remember seeing I don't know maybe + +[29:00] it was in the actual proposal maybe it was in the notes for this meeting I don't remember, which discussion about whether we should be like implicitly elevating and, which is the way, that it's currently written you need to meet all the preconditions versus or. If we do something like. If we care about or. Then probably it warrants something more complicated. But I think, that or also might put you into people not being able to understand what they mean yeah I mean you're certain like foot guns and stuff like I mean like you know first of all I actually haven't yet come up with a an example where or you need or, that's not to say they don't exist and I would appreciate people speaking up. If they have any such examples you can also do or by just signing multiple transactions yeah, that's not impossible. So I would really like to I mean especially. Because I think like to the + +[30:00] extent we want to go crazy we can do, that with claimable balances right like we already have a little or we should like try to unify the languages right like, which is somehow make like have a notion of precondition, that can apply to both clinical balances and regular accounts and stuff and once we start doing this I'm just seeing this thing like spiral out of control and I think it's like we're not like saying we can never do this like we can have like a you know a new we can add a new thing to the union like. When when we decide this is done. But like look like you know this is let's just try to be good and not perfect for this. Because because like what we have here is just simple it's easy to understand I think it's not going to be too bad to implement and I think it will be already enabled like a huge number of use cases you know I mean as oleg was pointing out this morning like in order to make lightning really work on bitcoin they had to go through like four different soft forks or something. So like maybe it's not perfect and + +[31:00] maybe we'll want to make other changes. But but we have, that ability. Because we have unions. So I think this claimable balance point is worth talking about for two minutes, though I mean do we think we're going to be wanting to add this kind of thing to claimable balances I'm asking. Because I'm feeling the pain of clinical balances in Horizon right. Now and how we wish we'd made the IDs more general. Because we built everything around accounts. But it turns out claimable balances have similar properties to occur I mean the thing, that the predicates on, climate balances have is, that they don't they're less monotonic than this stuff. So kind of intentionally like things like duration and like sorry min seq age and min secret ledger gap and stuff they have a monotonic property, that they'll you know like once they're valid they'll stay valid right there's still a maximum time bounds. Because we're not going to get and get rid of, that in the ledger bounds. But and I think, that monotonicity makes it a lot + +[32:00] easier to reason about like. When you're sort of forwarding multiple transactions on the same source account you know whether they can all be in the same block you know we don't want to have situations where someone can kind of waste a bunch of bandwidth by causing validators to forward around invalid transactions and things like, that. And so the monotonicity helps with, that as well you know once you start adding in not in all these other operators, that you no longer have, that and it just gets more complicated to reason about these things Nico and John any pitfalls, that you see in implementing this whether it's a an SDF team or someone in the ecosystem trying to prototype this and still a core for, which part the condition like the I guess the only + +[33:00] thing I thought. So so in terms of like thing I thought. So in terms of like the captain itself like at apply time I don't think there are like real problems from what I see like they are I mean it's a fairly simple change actually I'm still kind of trying to kind of really wrap my head around like the coupling I guess in some of those the coupling I guess in some of those things like the like does, that only work basically in for example like they are like the limitations on the flooding, that David added last night, that I read this morning in particular the ones, that are in particular the ones, that are related to relative timelocks themselves I think this really only works. If you basically do like the same type of setup or you have like two transactions where one kind of allows you to + +[34:00] to gate the disclosure. And then the actual right you always have to do this right it's kind of and in a way it kind of bothers me a little bit, that you know we have those constructs and they don't work independently I mean like all one actually requires the other one and I don't know it kind of sorry but, that's the point right you want, that you want to have to performing the action should require the disclosure right you know I understand. But it's in terms of the construct like you have this construct, that cannot be used outside of the combination right like normally we try to make things, that like for example the sequence number thing, that can be used in any other source of context right the other one the relative condition has to be used with sequence numbers like I mean like actually like other transactions actually it has to be paired with some other scheme + +[35:00] otherwise it's a food gun right like people will not realize maybe, that the you know it's actually a fairly subtle thing right, that you it you know. If some evil person is constantly kind of pushing transactions in front of you. Then your relative time kind of keeps moving. But it's always you know not where you want it to be right and you're in the it's not it's like you have to use it I mean there are other patterns where you could use it right you can imagine you know. If my account is idle for a year I want you know my family members to be able to like access the funds or something right. So you could just you could do it as a kind of a trigger you could do it as a kind of a trigger for idleness. So it doesn't have to be for, that. But it seems like the most of the cases do involve this pattern of disclose. And then act + +[36:00] but I mean I think it's kind of a very useful pattern right. So it is something no the pattern is useful what I'm saying is more like I guess you gave an example, that where I mean this is useful in a context. So we you don't pre-sign many transactions and maybe, that's okay right I don't know I mean maybe. So actually I thought I understood your point. But now I'm actually confused. So I thought, that your pro your problem was like well it's kind of inefficient. Because it like prevents you from pipelining back-to-back-to-back transactions or something. But it's it was not an efficiency question it was more of a like a potential attack right like you pre-signed a bunch of transactions. And then you expect the, that last one to just work. But you don't realize, that maybe there are ways to touch the account basically + +[37:00] basically. Because you have all like an example of a payment channel right like you can imagine, that you can actually. If there was no. If if the sequence number was not working in a way, that it would remove all transactions you would end up in situations in a situation where the timestamp would keep moving from all transactions right. So you end up invalidating basically your latest one and, that's not the one you want to invalidate right we're not invalidating you're just delaying it like you sort of you're worried about something like a live lock situation where yeah someone has low signing threshold on the account. And so they keep you know exactly like this kind of stuff right yeah I don't know from my perspective on this it's kind of like. If you're doing pre-signed transactions like you better know what you're doing or you're probably going to screw yourself up no matter what the pitfall is + +[38:00] but you can imagine a day where say someone provides a service, that does pre-signed transactions right where some of, that difficulty would be concealed. But I still think, that you know like [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) is only going to make, that less error-prone right. Because of the fact, that you sequence something less brittle. So I mean you know there are things you can do, that are would be weird with this. But but they would be weird anyway I mean like you could have a situation where like someone has low signing threshold in the account and they keep you know bumping a sequence number. And so my idol thing. you. So my idol condition never kicks in and I can't execute this. But they could also use bump sequence to bump the sequence number to you know max 64 and lock the account too right. So it's kind of like you're already giving someone the power to shoot you in the head. And then don't you know I guess be happy. If they + +[39:00] only shoot you in the foot instead of the head or something. But so can we just kind of accept, that yes there are ways to I mean yeah it was more of a yeah like I said like a concern, that we are adding more complicated things, that you have to know to use exactly right otherwise you know I mean I guess maybe, that's what John is saying like you know those are to be used only. If you really understand what you're doing and you know in a way like I think like. When I see the actual payment channel protocol is actually super easy to understand like you can you know like it takes five minutes to actually get it right like and you know it's correct unlike you know like previous attempts. So you know to me this is like the big win + +[40:00] Tomer had asked about you know implementation challenges and I agree with nikola, that the apply time part of it should be like super easy I mean a couple new. If conditions a couple of fields to update. But like there's not really any logic here. So it's I think it's actually probably the forwarding logic. So there's two places where, that's what I was going to say dude and the forwarding logic is like a little bit more complicated. But I think you know after I kind of thought it through and wrote, that new section last night, which yeah by the way thank you Nico for pointing, that out should have been in the first version of this I think it's not too bad I think like the main challenge will just be you know the forwarding logic is already complicated like there's lots and lots of conditions on conditions and the main new challenge here is just the additional bookkeeping burden of things, that used to not be possible for example like you receive a trans transaction, that's like signum three to seven or minsek three seek + +[41:00] number seven as David would prefer me prefer I specify it. And then the next thing you receive is min seq four sequinum six, and now you can actually add, that to the queue. And so it's all about like do our data structures allow us to do these things efficiently yes no I'm not sure probably not actually right. Now so we'll probably have to redesign, that stuff to make those things fast. But like it's probably within the realm of doable just the type of thing, that we'll have to do a kind of detailed study of like what do we actually have to do before we touch anything lots of positive feedback here yeah this is exciting okay. So in terms of my to-do things, that I should revise here it sounds like well I guess we never really resolve the union thing. If if I don't it sounds like. If i + +[42:00] if I don't it sounds like. If I don't want to predicate this in an argument about like unions and ledger state I should just dangle the extension v3 off of v2, which is my favorite. But whatever i'll just do, that. If I have to sounds like there is consensus, that we should have a duration and time, that we should have duration and time point types, that these should be signed throughout. And so I can change, that in this CAP. And then it sounds like there's also kind of reasonable consensus, that it's okay to stick with just one big structure with all the preconditions as opposed to trying to build a little language like for claimable balances and does it make sense for instance John you know you were saying, that you would need to do a detailed study you would need to do a detailed study to figure out. If the data structure + +[43:00] would allow us to do these things efficiently does it make sense to also start doing, that or is, that something, that should happen post these revisions yeah I mean our general goal is to try to do more stuff in parallel than we used to. So we can go faster we have a lot of stuff to do. So I don't know. When that's going to get onto our schedule David my guess is David will beat us. But in theory we can start, that. But in theory we can start, that whenever at this point I would say maybe nikola descents. But I there's nothing blocking us from doing, that and we've been doing a lot of work on, that code recently. So it's very fresh in our mind. So it actually could be a pretty good time to do it are not mandatory to write kind of are not mandatory to write kind of like a you know like a quick dirty perhaps dirty prototype + +[44:00] and obviously. If we accept [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) in the future and you know [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) in the future and you know merge changes you know we'll need to optimize. But we can think about it let's do, that first cool this CAP is premised on the idea, that it actually solves payment channels, though right. So we add another solution to payment channels we'd have to revisit whether this is something we want to do or. If I mean we do have other solutions they just have undesirable properties right. So this was the best solution for payment channels it would go ahead but. If it wasn't. Then I guess we'd re-evaluate whether it was still worth doing is, that right yeah I mean or we would do, that we yeah I mean or we would do, that we would tweak this. Because probably it seems unlikely, that a good solution for payment channels would not involve some kind of relative time lock it just could be, that there's some other feature we need to add or something or there's some small tweak, that we need here. So yeah definitely feedback + +[45:00] challenges like you know again it was great like lee came up with this idea of you know the one-way payment channels with different trade-offs. And so yeah. So maybe i'll keep thinking about, that too see. If we can push on, that to do like top-ups and stuff in there yeah I think what I imagine actually is, that there might be more work to do as people think in more detail about the closing like what really happens. When you close the payment channel like I know in the past a lot of the complexity in the starlight implementation was, that balances well we didn't have claimable balances. And then you have like yeah all the things, that can fail. And then you know there's no way to retry. And then you're kind of stuck and yeah like thinking about yeah like how to ensure, that things + +[46:00] are safe even. If you go beyond the one transaction right like. Because in this case you have this one transaction I mean I think it probably works. If nothing fails. If everything is claimed our claimable balances but, that's kind of you know we'll know as we actually go a little further yeah I mean the major pain point was, that like paying out to the responder could actually fail. And so therefore you had to make sure, that like you know it was a separate transaction from like restoring the account, and now the claimable balances can't fail it's like it's fantastic right it's just you can do everything and just like one transaction, that like fixes the thing yeah what I'm saying is more like the limit right there's a limit of 100 operations in a in one transaction. So are there cases where you have to go beyond, that and. If so is it just a matter of like right. Now you do two times I maybe it's like three times I is, that all there is to it I don't know saying like yeah a payment channel with like 100 assets in it or something + +[47:00] that, that would be interesting I think we actually could do well yeah or like it depends. If you do like multi-parties you know more than two parties like you know all this stuff right yeah. So yeah and I actually think all, that will work. But I could. If you'd like I could add a section on a like a multi-party multi-asset channel, that needs to span more than one trans closing transaction yeah I mean there's really no reason you can't just have multiple closing transactions with just kind of successive sequence numbers as long as they never fail right, that's kind of the thing no they can fail actually Because because it still consumes a sequence number they can't be invalid. But they can fail right so. If it turns out you'd rather pay out with an actual payment instead of a create clinical balance you can still do, that it's just. Now you have to consider the fact, that transaction could fail. Therefore you can't pay out to multiple users in the + +[48:00] same transaction you just have to have multiple closing transactions yeah. And then you have like the whole like what do you do with the leftover at the end and yeah you know the leftover at the end is you just the initiator gets control of the account yeah. But like, that's you know I guess, that's one decision you have to make on, that front, which is. Because we don't have a way to like the only thing we can do is basically leave it leave full control at the end of the leftover to like a given party you know, which I think you want anyway. Because someone had to like put up the base reserve and stuff for, that account. So why not let them just get control it's also it has this other advantage, that it lets the initiator unilaterally top up right, that you don't have to like do some complicated protocol to top up like the initiator can just kind of throw more funds in there. And then it can either spend those or it'll get them back automatically. When it. When it closes the channel leigh are you raising your hand yeah I have a question back on. So i + +[49:00] yeah I have a question back on. So I think we're getting in the details of like the specific payment channel implementation. But I just had a thought on something, that you were raising Nico about you know the foot gun of just, that min sequence age and I'm wondering. If like one of the early proposals I think David, that you shared attempted to use claimant balances as like the relative timeline. So as opposed to having the relative time lock in the account it existed out in this external thing somewhere else in the ledger and I think, that has some like undesirable properties, that you know we wouldn't want to do, that with claimable balances. But but is it worthless exploring, that idea of having relative time locks being like a very simple small thing on the ledger they get created outside of the account and we can define things like. When they expire and. Therefore transactions yeah. So actually the this I you know i + +[50:00] think we could still make payment channels work. If we got rid of the min sequence number. But actually it would have more foot guns right I actually think the min sequence number is there to reduce foot guns and to make a lot of things simpler by making sequence numbers less brittle I'm also interested in lee's question, though it's something i've been wondering this whole conversation like are there things, that we can do beyond changing the account state, that might be more powerful than just changing the underlying account state like you know recording some other state or you know transmuting claimable balances on a transaction exactly I was actually referring to the sequence age. So I was actually meaning like move, that out of the account into like some you know you create a lock and, that lock is automatically locked for like a specific age or something like, that + +[51:00] okay. So I mean I can tell you a couple things you could do. But they're all going to be more complicated. So one thing you could do is you could somehow guarantee, that a claim claimable balance will never be valid and fail right. So then you could kind of use a claimable balances as a kind of synchronization primitive and be guaranteed, that you're not accidentally going to burn a sequence number another thing, that you could do is you could add a replay cache to each account. So you can have a new kind of transaction, that instead of, altering the sequence number it like adds itself to the replay cache for, which you have to pay you know a base reserve. And then the transactions would get you know garbage collected from the replay cache based on their max time or maybe based on like bumping the sequence number this starts getting complicated. Because you don't want a single transaction to + +[52:00] be able to kind of delete a million things from the database just. Because then, that would be like an expensive transaction. So you'd have to kind of limit the per account number of entries in the replay cache or somehow ensure, that they can't all be deleted at once or have some other operation, that deletes like only up to 100 things from your replay cache, that are stale so, that again would, that so, that again would, that would simplify things. Because again you could kind of submit transactions it would be more a little bit more general and useful than this. Because now you wouldn't even have to worry about the relative sequence of these transactions you could have two transactions on the same source account, that could be submitted in either order and they would both be allowed to execute. So I'm happy to cook up a proposal along this lines of a replay cache and I think it would be have some nice usability properties I think it would be much harder to implement and I think it would end up having some annoying warts owing to the + +[53:00] fact, that we want to bound the amount of work, that any single transaction can cause validators to do. So there's either gonna be like limits the replay cash or limits to how quickly it gets garbage collected leigh do you have more to say I say you're still unmuted i'll leave okay on the replay topic I have tons of ideas about this David. If you want to talk about it, that I want to talk about in this context I actually drafted like a short proposal somewhere about this, that you could do this in a very like no storage kind of way on the ledger and still have no undesirable replay things we could talk about it offline. If you want to okay would it is it was your proposal good enough, that we should consider instead of min sequence number or talk about, that no it's kind of orthogonal to min sequence number it's + +[54:00] it avoids other kinds of problems. But it doesn't fix those problems. So but it does allow you to submit transactions without changing the sequence number it does okay yeah let's talk about flying about, that cool and I think we're pretty much out of time I mean I don't know. If there's anything anyone wants to say just at the end here. But it sounds like there's some pretty clear next steps any final parting words parting shots clever jokes awesome everyone well thank you for coming into everyone who's watching thank you for watching this was a nice deep technical dive into [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) and more to come on there. If you want to know what's going on you can always join the Stellar death mailing list and you can also look at the GitHub repo and look at the actual drafts for these kinds of proposals and I appreciate everyone here for joining in the conversation thanks. So much + +[55:00] you + +
diff --git a/meetings/2021-04-15.mdx b/meetings/2021-04-15.mdx new file mode 100644 index 0000000000..dc68d386e1 --- /dev/null +++ b/meetings/2021-04-15.mdx @@ -0,0 +1,195 @@ +--- +title: "NFTs on Stellar" +description: "This community roundtable explored how NFTs can be issued and used on Stellar, covering digital ownership beyond art, creator monetization, decentralized marketplaces, and emerging real-world use cases such as credentials, gaming assets, and natural capital." +authors: + - fred-rezeau + - kalepail + - steve-walker + - zachary-freundt +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session brought together ecosystem builders, creators, and SDF contributors to discuss what NFTs mean in practice on Stellar, how they differ from hype-driven narratives, and where durable value is emerging. + +### Key Topics + +- What NFTs represent beyond the acronym: + - Shift from “unique pixels” to broader digital ownership and certification. + - NFTs as proof of provenance, ownership, and history rather than just media files. + - Acceptance of limited editions and non-fractionable assets as valid NFT use cases. +- Creator and ecosystem empowerment: + - NFTs as tools for artists, musicians, and game developers to bypass centralized platforms. + - Direct monetization without app stores, streaming platforms, or heavy intermediaries. + - Long-term sustainability for creators outside touring- or platform-dependent models. +- NFTs on Stellar specifically: + - Asset issuance is fast, affordable, and environmentally lightweight. + - Use of the Stellar DEX for discovery, bidding, auctions, and settlement. + - Leveraging timebounds, path payments, and trustline controls for NFT workflows. +- Platforms and implementations: + - Litemint’s NFT and collectibles marketplace design choices. + - Use of Stellar asset metadata standards rather than prescribing storage layers. + - Cross-network considerations, including easing Ethereum NFT fees via Stellar rails. +- Beyond art: real-world and functional NFTs: + - Credentials (degrees, certifications, badges like Stellar Quest). + - Gaming items and in-game assets. + - Concepts for real estate, co-ownership, and fractional investment. + - Early exploration of natural capital and impact-driven NFTs (e.g., forestry). +- Open challenges and open questions: + - Fractional vs non-fractional asset handling. + - Legal rights, licensing, and copyright vs on-chain ownership. + - Storage permanence, decentralization, and viewer dependency. + - GDPR and personal data considerations on immutable ledgers. +- Takeaways: + - NFTs are best understood as programmable ownership and certification tools. + - Real adoption comes from strong use cases, not speculation. + - Stellar’s flexibility enables experimentation without dictating outcomes. + - Standards and ecosystem coordination will be critical as projects mature. + +### Resources + +- [Litemint](https://litemint.com) + +
+ Video Transcript + +[00:00] All right, all right. Here we are, finally the Stellar round table on + +[01:00] NFT, so we are going to talk about on fungible tokens today. I will admit it wasn't that long ago before I thought, this had something to do with mushrooms or fungus. I didn't know what a fungible or non fungible token was. That was definitely one of my most earliest google queries, back when I was first getting introduced to this concept. We've got a fun little discussion for the next hour or so. We've got some fantastic guests on. I'll start with myself. My name is Tyler van der Hoeven. I'm the ecosystem evangelist at the Stellar Development Foundation. I spend my day writing documentation and doing workshops and building apps and understanding what the ecosystem, what folks like these panelists here are up to day so I think I have the best job, but could be wrong. + +[02:00] If you all can go ahead and make sure, you are unmuted and we'll go ahead and go around this proverbial round table. Maybe, Steve, if you want to go first a little bit, if you want to go first a little bit about yourself, how you got into Stellar, in what you're up to. Yeah, absolutely, cheers. So my name's Steve. I'm the CEO at task. We've got involved with Stellar moore through the community fund. We've been working with Stellar for. two and a half years. Task is a system that allows organizations to connect and engage with their teams, gather information, provide rewards and incentives through to teams and then take all that information and report back to investors and donors in the market in general. You know that's predominantly what we do. Got some interesting aspects about how we believe NFTs can be used, which looking forward to getting into later. And that's all from us. That is me super fantastic, really + +[03:00] Excited to have you on, particularly for that less than traditional use case that we might think about when we use that word NFT, fred, how about you? Yeah, hi everyone and thanks for having me. So, and also I wanted to say anion, gaseo j and all korean still up and watching at that time. So my name is federico. I am the founder of lightmint. I'm also the owner of teleport. So lightmint is a gaming platform basically connecting gamers to the crypto world with game asset ownership, cross currency, decentralized in app purchases and also a lot of other features that leverage the Stellar blockchain. So just recently- and it was alive with Sam Conner on monday- we unveiled our NFT and collectible marketplace on Stellar, and the marketplace basically was created to + +[04:00] Connect the dots between all our products to provide more synergies, and it will accept any kind of NFTs. It will be available publicly within the next few days. We are still fixing a few stuff and, yeah, I will talk about it a bit more later. Super, yeah, very excited to have you on board. You've been around for a long time in the NFT space. Super excited about that NFT platform. Can't wait to start playing on that. Zach, how about yourself? Stellar zack? Hello hi, my name is zach. I am the director of marketing solar development foundation, which may make you wonder why am I here to talk about NFTs on an ft roundtable? My actual introduction to the space was actually blockchain collectibles- pre like 2018, like 2017, 2016 kind of stuff- and it's been interesting to watch the evolution of that kind of thing and the evolution of digital ownership and I've got a lot of thoughts just + +[05:00] Generally, just having explored this before my time at SDF, and, yeah, I'm excited to hear what, how we, how you guys, are implementing this stuff into your current solutions or use cases. So, yeah, super, yeah, very excited. Zach and I have a fun little history of before either of us worked at SDF. We were doing some stuff around color glyph. That's how we were introduced, not exactly NFTs, but still the idea of crypto gaming items and memeing the internet with strange and wonderful digital items. So for our first discussion topic, I think it's good to lay a groundwork, kind of create a foundation around definitions. Were you going to use the word NFT a lot? I think it's an acronym, but the acronym maybe has grown a bit beyond just that, or NFTs have grown a bit beyond that, fred. I remember when you were talking with Sam connor a couple days ago, you touched on this a bit- + +[06:00] That NFTs is an acronym, but it's grown beyond that a little bit. So, if you can briefly touch on, when somebody says NFT or when you think about it, what does that mean to you, what does that mean to your users and how does that affect the way that you build enlightenment or products supporting NFTs? Yeah, I mean so. NFT is essentially like, yes, strict definitions, they are unique assets and that means they are not interchangeable. But, yes, the way people use the term NFTs these days is more to describe a wider range of non fractionable assets. Right, so they are not necessarily unique. And, yes, this definition fits better, much better, the current use case of NFTs, which is essentially to provide proof of ownership for digital content within a decentralized world, right, so, yeah, that I think with the more and more decentralized and the more and more disengaged our + +[07:00] Digital platforms become, the more nfcs are meaningful. Yeah, I remember when- because NFTs for me obviously kind of meant that acronym but then I saw an auction or a sale or something that was a one of 10 NFT sale. I'm like I feel like we're moving beyond what NFTs actually are and they're starting to take on. There's nothing wrong with a one of 10 sale of something, but that's not what an NFT, the acronym is. So maybe it's starting to move beyond that. I don't know, zack, maybe you have some thoughts around this concept of moving away from just an acronym to taking on more of a ideological ownership. Freedom there, I mean I'll echo fred. I mean 100, it's like digital ownership stuff early on. I mean if you look at, I mean outside of, like colored coins and stuff, but moving into like early Ethereum collectibles all you had were to work with erc20 like contracts, + +[08:00] And you know my like, the stuff we were working on were modified erc20 contracts, that like tracked ownership, but they were like single, like, not no decimal thing, so like we were thinking about things like trading cards or like you know rare, like rare items and drops and video games or something like that. Like those things aren't unique in that you can like you'll have multiple mickey mantle cards in a baseball card pack, but there's only so many that exist, and so I think the acro acronym doesn't bother me, because before there was NFTs, that concept existed anyway. And so if that, if we're calling them nfps now as like the broader space term, I don't think it matters if it's a one of one thousand, because the principle is the same, it's not like a currency or something else. Yeah, yep, yes, that digital ownership that I think is so interesting and that, being what people mean when + +[09:00] They're talking about NFTs, is taking whatever we used to mean when it was physical ownership of something and finding a way to turn that into digital ownership, where you can transfer and gain a lot of that same value, where I'm holding the one thing or there's only ten people in the world that have this. Somehow you can replicate that digitally, where you're not physically holding an item but that same construct is existing. Steve, did you have something to add there? You know what I mean. I agree with zach in that I don't for me, if this allows generation of limited editions, which really is the kind of one to ten or one to a thousand, the lovely thing about NFTs is it's empowering artists. Again, you know art, from pictures to music and all sorts of creativity. I think when you combine NFTs with smart contracts, we're going to see a wave back. I mean if we had digitization came in, you could copy and paste everything we saw: file sharing. If you look at the music industry, + +[10:00] The only musicians you can see now typically a huge artist because you make all your money going on the road. I think with the NFTs and smart contracts and the way that in which you can start to reward people, we can go back to the old kind of cottage industry of someone's in their bedroom they're pumping out great tunes and they're making great art and there's a way of them having a revenue stream which respects them as an artist and they don't need to be, you know, going on the road. I think that will be really lovely to see because it's a kind of it's going back to the value of art before digitization kind of took that away. I would even take a step further and say, like you know, the art stuff is really cool from a perspective of the artists are basically the guinea pigs in this NFT experiment. They're out there and they're putting these pieces out and they don't know what the like long term, how the format's gonna treat them, like what they're gonna do, but you know they're testing the medium, but like they're there breaking grounds for I think things we talk about like there's projects in our ecosystem + +[11:00] Already that are thinking about like fractional analyzing things like real estate. But if you're just like a normal, like person, and you're buying like a home, you currently have to go through something like a title company and multiple parties. And there's an evolution in the future where NFTs, like an artist, could create a beautiful piece and it could be basically the digital representation of the title for a home. So you could have thing like other things represented as NFTs, like a home title or something like that. It does end up circumventing like processes right now that we have to have a title company, because the source of truth for the owners, like who owns the house or who owes the title to the house, but like NFTs, you know that concept applies very nicely to that and I think the artists are breaking the ground here. But the digital ownership doesn't stop just with art and it doesn't just, and I think fred's take like: + +[12:00] Yeah, I think you guys are obviously, pushing beyond just the art stuff here as well, yeah, I mean to me one of the main use case as well of NFTs and basically being able to address the digital goods needs is the fact that if you look at the industry today, you will see, for example, in the music industry, spotify is a platform that controls most of the paycheck of all those artists, and we have the same in the gaming industry with the app store, the play store and, with this, the availability of those items outside of those platforms and on the centralized platforms, on decentralized networks. Then we can really like break out from this model. There was a I just wanted to add with you know, as you break out from that model, as you look at things like you know land ownership, I mean for me, I'm very interested in that aspect of actually taking the digital certificate, having it mapping on to physical objects. I think there's a lot of potential there. But I think when people say well, what's the point? + +[13:00] A really easy way of summarizing it is: you look at something like fake degrees. Any of us can go out on the internet. We can go and grab a fake degree of whatever, and you know it's not the biggest problem we have in the world. But in terms of fixing that problem with a NFT, which is, you know, simply tied to someone's domain, with a tamil file, with an issue, you know an asset that's being issued, now you've got your NFT certificate. It's very you know, and it's you know. You've got a degree. That tumble file can be at harvard or whatever the education institute is. You've got a yeah two. Second check. And the other bit that I think can become interesting is that can become automated. Background checks just get automated. No one needs to actually go and have a look at anything. And you could extend that further into a personality test. So if you want to apply for a job, I complete some personality tests. I'm + +[14:00] Quite happy to open that up for someone to look at. So I'm sending NFTs, which is, you know, here's proof of my degree, here's some personality insights, to see if I'm right for the team. There's various other kind of digital aspects of my personality I could be putting forward, and then you know, you almost start to get rid of hr and recruiting departments, as that automates loads of systems there and it. It's not the biggest problem in the world, but I think it's a. I think there's interesting models of using NFTs in that type of way. Yeah, it's something that's been interesting, even on Stellar quest, because we have these different challenges that people do and they earn badges and it'll be neat. Like you don't get those badges unless you've completed the challenges, and so having those badges in a wallet that you can verifiably prove that you have ownership over is a stamp of, I've gone through Stellar quest series one or Stellar quest series two and that means that I know something about Stellar, because I can't + +[15:00] Something about Stellar, because I can't get these badges any other way. That's really neat. And so the NFT is a vehicle for trust or a vehicle for assurance for an employer to say, okay, well, we need somebody who knows Stellar. I hope they do, oh, look, I can. As long as you can prove ownership over this account that you say you own, I can have some level of assurance. And seeing that model moved out, where NFTs are a vehicle rather than only a piece of art or something that you own is as the item itself, but more vehicle towards something else, is kind of cool. So that was kind of our initial introduction to what NFTs are and how they're being used a bit in the real world today and I think we've already touched on it a bit. But if we could transition slightly to- there's so much hype and buzz and noise around NFTs, if we boil some of that away for you all personally, what do you see the value of NFTs + +[16:00] For the spheres in the realms, you're in, we've already started touching a little bit. Those begin to show themselves in like product use cases and the things that you all are building. But if you can touch on those a bit more in depth of what, if you scrape away the noise, what do you think will be left from NFTs in five or six years, maybe Steve, if you want to take that one first? Well, in five, six years, we will definitely be collecting NFTs because I think, as humans, we just love collecting stuff, so that you know that the collectible aspect and you know the getting away from the ridiculousness of buying pixels for millions of dollars, there's gonna be a steady state of very happy adults and school kids collecting lots of interesting stuff because we just love doing it. So I mean that you know that's great. There's + +[17:00] That certification aspect, which I think is personally. I find that interesting. I think it can solve a lot of problems if you expand that certification concept in terms of who is doing what, who you know who's doing what, which can be at an individual level, it can be a organizational level. I think if you look at Stellar or any blockchain similar to Stellar, in that mission which is get the finances through, get, you know, get finance, financial fluidity globally, that's one problem you solve. The next problem: is it going to the right person? One of the key issues that we have is finances being misdirected. I think if you combine the certification, you can start to direct finances in the right way, because you can start to get it through depending on particular certification and deciding that's going to be the way that you would send finances through. So I think reputation and building reputation, + +[18:00] Certification can work in all sorts of ways that tie and automate financial distribution. I think the other bit that I hope will be something we see in four, five, six years- and it's certainly an area that I'm interested in personally- is that if we look at the key issues we face globally, the incentive schemes don't work. And I remember a friend talking to me a couple of years ago talking about you know natural assets. How can you take national natural assets and convert them into investment propositions? If you could, if we accept that human greed is typically what drives things, then could you harness natural assets in a way and turn them into investment vehicles where they give financial roi and there's then a desire to protect, there's a desire to you know whether it's. You know + +[19:00] Reforestation, whatever it happens to be. Now, if you take the NFT and you can track that, to track physical assets, natural assets, you can then have investments into, say, forestry. There's a the potential of you know as you purchase younger trees, if there's evidence that they're growing, that asset goes up in value naturally over time and if you're using it as an NFT which is tradable, you have a natural asset which people can be trading- and I think it's a, it's an idea which I think, if you get the economics right, you could go out and start to solve some of these problems, because a lot of the problems we have are just because the incentive schemes are totally wrong. So I'm hoping that there's potential there long term for NFTs in terms of natural asset investment. Yeah, fantastic, I echo a lot of those sentiments personally myself. Fred, how about you? + +[20:00] Yeah, so I mean, there's a thing that says there's no bad publicity, right, but so I think somehow these stunts are also. They are not the ultimate use case for NFTs, but you know we can't stop the excitement of this part of the human nature. And then also, you know NFTs as a much wider appeal to mainstream than cryptocurrencies, for example. So I think all this is part of it. We need to be careful to not engage into too much destructive, reckless behavior. But I think somehow there's also good side into this kind of hype and excitement around nfgs. So, yeah, definitely not everyone is gonna sell the first five thousand days for 69 million and that's definitely not the use case. But so the way I see NFTs in the, that contributing to a better world is essentially being able to power decentralized world. Right, that's what + +[21:00] I was saying just before. the ability to disengage completely from those platforms. Because currently, let's say, you are a game developer, the only opportunity for we for you to sell NFTs or sell in app purchases is to go on the app store, on steam, on all those centralized places which somehow they impose their rules, and we saw, for example, with the games like fortnight, what happened when the store suddenly decides to put you out, and same for the music industry. So basically that's the way I see NFT is powering a decentralized world of tomorrow. Yeah, zach, I mean I was just going to say like I'm going to take fred's thing and what Steve says and then I'm going to say you said, did not hype. But I feel like this is going to come off feeling a little bit like hype. So the current like situation, obviously all this like the art trade stuff, there's a really + +[22:00] Unique thing happening where you're seeing like boutique shops now open up that are like going to, they're like labels. I would say, if you look at music industry or like art world, or they're like an agent where they are going out and seeking people and they're like trying to set them up with like an NFT strategy for releasing their stuff. And that's going to go away at some point. There will always be boutique stuff but like right now, that's where the opportunity is. But as people figure out like the strategies for this themselves, it will grow and I think you know what I was writing about. A lot back in 2016 and 2017 was like where are we going to go? As far as like working in the future? I wasn't expecting like pandemics and stuff, but like we have this. Everybody virtually now has introduced the internet because everybody was at home for a year and so you know offices may not necessarily be the same kind of thing. So you can have virtual workspaces, you can have like this weird match between virtual reality and augmented reality. You need to still have traditional things, like people still will live in houses, people will + +[23:00] Still own cars, there will still be like things that need to be bridged from the traditional world into this world. But like I think what I've seen happen, and even going back to fortnite, like yesterday, I think, epic announced that they had gotten a billion dollars in funding for something called the metaverse, which is going to be like brand disney world, I guess like vr brand disney world. It'll be like ready player one. I see those kind of environments growing second life is a great best bet experiment for virtual real estate, like they're all this stuff. To fred's point about like decentralized world, like that will come. It may not come in the next year and it may not come out of this immediate cycle. But people, like are definitely, I think, thinking about where to get to that point and what that means like am I going to be here in 25 years? Is this a five year thing? A 25 year thing like how long will it take for adoption and what steps are required to make it easier? But I think there's just infinite applications. It just depends on how + +[24:00] Digital we decide to get as a people. Yeah, it's just to comment on that. This is going off piste a little bit, so pull me back if it's gone going too far. But I think there's when you look at collaborative currencies, if you look at collaborative currencies which then complement a traditional currency, so you have communities that can use a currency between each other, then again in my brain I see I always see the NFT with a smart contract and being connected with some sort of inputs. The outputs can be certificates in the form of NFTs. And if you look at a collaborative currency where communities are working together and you're looking at reputation and understanding, trust between the different stakeholders within the community, then I think NFTs can come in there, if you look at that kind of absolutely digital future, decentralized future, then you know how do you + +[25:00] Automate reputation and understanding the different players within that community. I have a another thing that kind of goes off of that I've been chewing on recently, and that's on legality of nfd stuff and ownership. Like what, when I'm purchasing an nfc now, what does that actually guarantee me? As far as rights, I know you know if platform developers have to consider copyright and like cease and desist- and you have to. There will be tools to hide things from platforms that allow you to mint through them obviously will have terms of service that are like, hey, you can't do these things but because of how their, these assets are represented on chain, like, just removing them from a platform doesn't remove them from existence, and so I'm wondering if there will be a big giant lawsuit one day where you have all these collectors basically suing for rights to use art that they've purchased and or vice versa, and what those actually guarantee. And I'm curious if, + +[26:00] Like, there's a whole industry for like terms of service that may need to come out of this like what? I suspect there will be a court case that resolves that if you put the terms as a service on your NFT itself when you mint it, then that will be the legal grounds, but anything done without that, I feel like, is going to have a bad time, but I don't know what do you guys think as far as like what does owning an NFT guarantee you, fred? I think, for Steve or Tyler, that's definitely something that, for example, in the Ethereum world, they started to work on that. You can see dapper labs, I think they started to specify a license called the NFT license. You can read it on NFT license org I think, and it specifies a lot of things about this. But, yeah, that's definitely a rule that NFTs cannot address. With the address ownership, they don't address the copyright and you can obviously have a lot of votes. They're + +[27:00] Difficult to address the question to the group. What's the situation with gdp, gdpr, blockchain, NFTs? Because if you're, let's say, a blockchain provider and someone decides to put personal identification information- and Stellar would be in the memo field- what happens? Whereas the where's the blame, where's the right to be forgotten exercise that must have come up in conversation. I'm just intrigued to know. Well, fred, do you want to tackle it? Or I'm not expert on that, yeah. So, yeah, I'm not legal, I'm not a legal counsel or anything like that, so I don't know if I necessarily have the greatest answer for you, but I think you know something like we maintain: like in through a service that may be something like mailchimp or like emma or whatever, like some digital database of things that's easy to go into and remove because there's personal information identified with the + +[28:00] Service that has those tools, but with a decent, just distributed network like bitcoin or like Ethereum, or even Stellar, like what's on the network. Part of using it like is you understand that's what happened. There's no, it's immutable, right? So you're not, like you can't undo this stuff. So I don't know. I mean I feel like that's probably a whole different conversation of like what happens when personal data is on the blockchain, about KYC, AML, stuff to roll into. It's a big question. I don't know, I don't have an answer. I think it's pretty. Yeah, for sure, I mean so much of the liability there is on the entity that put that data there. But because I mean the assumption is, or the way these laws have been written- is the assumption that if you put it there, you can also take it off, and so the laws revolve around: take it off, you know, remove it, delete it. But when that assumption is no longer true, you're going to need different rules, and I don't know what those are going + +[29:00] And I don't know what those are going to be, other than I think it's probably going to boil down to. You need to be really careful what you put on a blockchain and that there's liability for you as someone having access to the blockchain, particularly less around tools like Stellar, which aren't designed for arbitrary data storage. You can put arbitrary data on there, but it's not designed for that, but I would that. My concern is more around things like ipfs and filecoin and our weave and these blockchains that are specifically designed for arbitrary data storage. That becomes more, that becomes a bigger issue where your purpose is to store arbitrary data, and so the assumption will be: I'm going to use this instead of my database, but if you don't understand what you're putting on there is immutable, you might end up in trouble. Yeah, you know what it leads me to believe, though, like I feel like any like based on no precedent, or like court cases- I can't pull anything up, I'm just pulling this out of thin air. But I feel like, with you know, with the internet, + +[30:00] Everybody's just using browsers to interpret like things. Like you can have a hardcore user that like that's you know doing it through. I don't know them like they like how David uses the internet, I think is the best way. But like you can also- and that's gonna be very specialized and people are aware that, like there's a dark net as well, like there's a, or you can use onion, like routers, like whatever like, but that's not the primary use. So my assumption would be: just like any platform, it if a primary way to display this information like foreign is a great example, because nfp, like, let's say, an Ethereum one, that's just a smart contract. There's not an image associated with that. There's not. You have to like interpret that through whatever thing and like called ipfs so you're viewing it in a viewer or something like that. Most people, you know, if they can't see that or they can't view it, they're never going to see it. So I feel like that's good enough, as long as it's for a majority, I mean I. + +[31:00] As long as it's for a majority, I mean I don't think that's going to settle a court case. You know saying, oh, that's good enough, they can't see it, but I think you know that's kind of how the internet's treated currently. Like you know what's you know as a user on the internet, what acceptable use is. To some degree, I feel like maybe not, maybe that's a wrong interpretation. Yeah, it'll be interesting, for sure to see, and that, to me, is one of the biggest outlying I wonder. So, for example, the NFT license that has been specified by depot labs. So they say that we can take an NFT on a marketplace, as long as we can prove the ownership, that the ownership is proper, before putting it on the marketplace. So I think the marketplace will have to do a lot of things in order to prevent being under all sorts of things + +[32:00] Like that. When you look at the future, I mean you're going again going back to. You know zach said about that digitized future. When it will happen? Of course, the lovely thing is when a system is in, is invented, which is what we have with blockchain, where it's released and it can't be stopped. So with NFTs, I suppose there's that same question, which is- and, Tyler, you were talking about- you know true- decentralization of NFTs the other day. You know how did you get to that state. Now you have these and we perhaps should talk about this, which is: you do have that you're talking about. Your description was: you're looking through a window. You have to go to the shop, look through the window and look at your NFT. How do we decentralize that? The second you decentralize it, then the laws have to wrap around the NFTs and not the other way around. I mean: do you want to? Could you go into the opportunity for decentralization and + +[33:00] Creating that sort of system? Yeah, so I mean the way that almost all, if not all, NFT marketplaces work is. There is that interface that you're accessing your NFTs through, and there is some where you can take your NFTs and view them on some other blockchain explorer or some other interface. But there tends to be that I can only get at this through a browser, through a particular website, and particularly when you split the NFT, the NFT between the contract that allows you to sell it and the actual representation of that asset, the- and the ipfs address or whatever it might be, that actually holds the image or the art, you're beginning to centralize. You're at least beginning to silo the usability of that thing to where, yeah, if your NFT is inside Ethereum, you have to look through a window, + +[34:00] Open c, or look through the foundation to see your NFT. But if they decide to pull the shade on that and you view that url and they've decided that NFT is no good anymore, you've essentially lost access to that. And so, being by decentralizing that again, yeah, you're what you're doing- is you kind of have to take the reason that they might pull the shade and address that before you ever mint it, and almost put the viewing mechanics or the access mechanics and bake that into the NFT itself. So, whether that's the code to view the NFT, along with the actual art for the NFT, but all of the pieces for how this thing is represented and x access, so not just the security but and not just the trading of the thing, but the actual access to the thing, the way that you hold it and identify to those around you look at this thing that I own whatever that mechanic is that has. + +[35:00] I own whatever that mechanic is that has to be part of the NFT too, so that access to it is just as central to the thing itself, so that you're able to show it. But when you show it, it's also still there. It's so strange that these things are split, because if I hold a baseball card, all of that is contained in that baseball card, but when we've transferred that digitally, we've split these things up and siloed them to where each one can fail individually. And now you're holding up a baseball card but there's no image on it. Or you're holding an image in your brain like no, I can see it in my head, but nobody else can access that picture by having the card that you're showing them. It's weird to think about, but that's the world that we've created. When we've siloed all of these things, we have to find ways to bring those things together in my mind, or else, invariably you give it enough time. These things are going to be siloed, things are going to go away and all of the things that we're purchasing and accessing now are going to be so broken + +[36:00] That they won't have the value that they have, and there's going to be a lot of this sort of hype that goes way up. None of it's going to work and it's going to leave that bad taste in everybody's mouth and we're going to have to slowly crawl back up before we're able to start innovating again. How much does that fit with? Did Stellar want to fix that problem or does it conflict too much with being fast? So there's obviously a big difference between the Stellar Development Foundation and how we use Stellar But Stellar itself is an open protocol that anyone can use for their purposes. So it's if you approach seller and you think it can work for your purposes, go for it. I don't think it's something that SDF is going to work on. Where we're cross border payments, where open access to the world's global financial system, it's a different goal. I think there are some crossovers, particularly when you start to define what is value or what is a financial like. What is that? What does financial access actually mean? What is + +[37:00] Finance. Those things I think begin to be answered differently when it's digital. But when you look at Stellar itself, the protocol in the network and how changes are made to the ledger, the cryptography behind it, that works great for all kinds of use cases outside of this. There are certainly ones that don't fit, but if you approach the network and see that it works well for your use case, there's nobody standing in line saying sorry, yes, it can't do that. And I think I mean I think any of this stuff like, even if SDF is not, like we're not internally developing NFT standards- I think that you know we're having this conversation because some of you guys are and you know I think that it would be worthwhile to see a conform standard or two for this across the platform. You know we've been talking about it now since I started SDF and I'm like I've seen fred's + +[38:00] Ideas for like how he's doing it. I've seen other people working on projects. There's a couple of other marketplaces that have been launched simultaneously and it's just been interesting to see how the conversations that have happened over three, four years are now starting to like that work is now dropping and it's really cool that it's dropping simultaneous to the like hype cycle, because it's not like this, like the work that fred, for example, has done is new. It's not like he just spun this up because suddenly people sold stuff. It's something that he's been doing for a long time and you know some of these other projects that are also issuing entities on Stellar. So I think a standard would be great, like an ecosystem standard. But I think you know it's going to require use cases, which I guess is my. Other point is like I think a big thing we're seeing is a lot of people want to issue NFTs on Stellar, because NFTs are a big deal. But if you look at like right now in the space and you'll see other networks and not name or bash any of them. because I don't think there's anything wrong with it saying like you can issue stuff on ours, + +[39:00] Here's the platform to do it. I think you know people will try that stuff out. But the NFT collector, the one that people are thinking of right now, they're all in Ethereum. They all have and I think the way that you really get people to use nfps is through use cases. Like lightmint is a great. Like you make the game, the users just want to play the game if the NFT is part of that experience. You know they can learn about how that functions as they learn like that community and environment and ecosystem and platform. And I think like actual use cases will bring people to networks and they'll get NFTs through those use cases. I think that just issuing an NFT on Stellar because you want to do an NFT but you don't want to pay 70 dollars in gas on Ethereum. That makes logical sense, but it's not going to attract the current like insanity that you're seeing in other like on Ethereum around it. Does that make sense? Yeah, I mean I wanted to get back just about the, because the + +[40:00] Way on the storage first for NFTs, the way I chose to approach this problem with enlightenment, is basically by not addressing this problem. So essentially, I use what Steve was pointing out- the. There's a step one specifications from Stellar ecosystem, a proposal standard that allows us to describe an asset through a normal file, and that's the only thing that the lightweight market place we will basically care about. So whether the item is stored on ipfs, whether it's stored as a managed data inside the asset itself, is not something that the marketplace will address. Basically, as long as your item is described properly within the domain file, then it will be displayed properly to users on the marketplace. And I decided to use that because at the end, it's currently a standard that is used for Stellar, for describing all assets from Stellar. And this way I don't make- + +[41:00] And this way I don't make- assumptions about how an event will stand out, will come later on- for storing the data, because there's also another thing that I was wondering and is: yes, it's nice to have access to the art in a kind of a permanent way, But is it really where the value is? Because if you look at, for example, the way the first five thousand days has been sold for 69 million, it doesn't really matter if someone gets- and you can actually get- the high resolution copy on ipfs for that app. It's not what carries the value of the item. What here is the value is the fact that someone has the signature to sign a transfer transaction, to pass ownership of that up, And even if tomorrow the only trace remaining of that art is like five by five pixel resolution of it, I think the value will still be there because someone has the ability to sign the + +[42:00] Transaction. I would agree with you on that point to most degree. I think you know newcomers, the space, they want the art, and so when people pull things like change the art on open sea, like, that becomes a pseudo news article because people are like, well, it's gone, but if you look and listen to the, you know the people who are doing art with bitcoin, like colored coins or rare pepes or like, if you listen, the early Ethereum, like style NFT things that have been converted. There's arguments about like are you purchasing the ledger history, the signature, like the ownership here. If you can track the origin of like the first press, then that and like it goes back all the way. So like, even if it started, you know, on one chain and ended up somewhere. As long as you can trace all that back. I think there was an article on italic recently published around similar idea. It's like that's what you're purchasing. And I think to fred's point about art- is like art. People are minting these + +[43:00] Pieces as art. Currently there's no reason that people piece can't be like a legendary sword in somebody else's game and in that regard that aspect, that asset, is going to be represented differently. It exists on chains, so if you hold in your wallet you can like, you know it's proof your ownership, so they can pull that from. You know whether that's albedo or metamask or whatever you're using from, like these things or however. The game interacts with these assets. So just because it was minted as art and there's an ipfs that goes along with it, doesn't mean that every application of that asset that exists has to be art related. I agree 100 and I think that's a really important distinction to make, particularly for the future of NFTs. Even in just identifying what are people actually buying, what are they actually getting, what's the legally defensible item that you're arguing over, what's the thing that's valuable? And it's interesting because the representation + +[44:00] Like traditionally in our brains when we think about what are you getting it? Is the music it? is the picture it is, but because it's so often tied to the medium by which you got it, it's hard to separate those things out. But when you actually look at, what are you selling, like if I go, and I steal that image and mint it myself, outside of there being copyright issues, it's not valuable because I'm not the artist and I can, you can prove that I'm not the artist. I didn't actually make that thing, and that's where you get an art fraud now is. You're not trying to find which one looks good. You're trying to find this artist was the real deal and he painted something. We need to find that one because he actually touched it and it's less the item and more the assurance that the person behind it, the certification that it's legitimate. That's what the blockchain gives you, not the connection to the pixels or the sound waves or whatever scratches on the hard + +[45:00] Drive might be like: the thing that's valuable is the that are, that origination, the certificate and that's. I think we need to keep that in mind, particularly because I think it's extremely interesting that art might change, that as something gets purchased, it's programmed to change in a certain way over time. And we even start to do represent, like if you had you know this 69 million dollar NFT but you could somehow load it up into a game. Maybe it becomes this weird meme thing that no matter what game you're in, you can load in this NFT and it gives you superpowers. Like that's really interesting, that's something you can't do with physical art. That would be fascinating. When you start to approach, what can we do with this medium? That's innovation, that's something that we can only do because it's digital. There's some really fascinating things that I think we need to start exploring by moving away from assuming that people are buying pixels and instead assuming what's true is they're buying certification, they're buying something on the + +[46:00] Blockchain, they're buying their stamp, their part of history in the chain of whatever this thing originally was, because it's actually quite simple: when you get down to what is something value like, what makes something valuable, it's just what is someone else willing to pay for it? And so long as you can make that a large group or large amounts of money, the assurance there is on the blockchain, and that is good. That does work. That's already been proven. The attaching images to that, that's difficult and there's legal issues around there. But if you can rather just have this chain of events and transactions and assurance of ownership, particularly originally, which you can do now, I think you've got something that you can really begin to innovate on: cool, super sorry, are you off to you, Tyler? No, go ahead. Well, I was just going to come up, but there was a, just a model + +[47:00] Which I had a conversation with, someone the other day. It was a interesting brainstorm. Yet it's combining smart contracts with NFTs, is using an NFT to represent some physical. In this case, let's take a tree. So the person I was speaking to was saying that they wanted to plant trees and they hate the. These models where you know a dollar a tree, the tree gets planted, it gets ripped up six months later. The question is, if you want to plant trees, you need them to be there five, ten years to get the benefit. So that's an asset which is developing over time. It has more value the older it gets, typically. And so if you take a smart contract and you tie it to an NFT, the NFT is tied to this. Tie it to an NFT, the NFT is tied to this case. Let's just take a tree. It's probably going to be a number of trees together and in the smart contract you say to the donor: okay, you're going to + +[48:00] Invest 10 into this smart contract, and when you do that, you're going to get the NFT which represents the tree. The smart contract, then, is where there's checks going on to look at that asset over time and put inputs into the smart contract, saying, hey, tree's still here, everything's good, it's being watered, and on that basis you've had the ten dollars going to the pool. Say there was a dollar pushed out at the beginning to plant the tree. A year later you have a couple more dollars goes and it's going through the community that's looking after these trees. So you can use a smart contract combined with an NFT and if the tree goes you can burn the NFT. You have a way of relating and having a long term investment around natural assets. I think you know my. The way I explain it is probably not that clear, but I think there's a useful way of investors being able to put money into a smart contract, NFT representation and a long term view. + +[49:00] I don't know what you think if that. makes any sense at all. It's super interesting and I think that the issue for me becomes around how do you make assurances between something that's strictly digital and programmed and something that's very much not like what's to? There has to be those reward mechanisms, kind of like what you mentioned, that like what's to keep me from taking an axe and chopping down the trees so that I can plant another one and mint more tokens and get more money like somehow make it monetarily worth keeping that tree alive, and I think some of the problem is there. But I also think there's opportunities in NFT spaces or even just tokenization spaces, where you can get retail investors involved, where it's not big corporations but individuals have a tree token, and somehow there is that ability to keep tabs on what's happening, though, implement economic models that make it more worth it to keep these things as long term investments or opportunities for foresters to + +[50:00] Ensure that they're doing the right thing, that there's monetary incentives for that, because right now, so much of that is flipped where. As long as it's more worth it for me to do the wrong thing, will I choose to do that more often? So we have to flip those economic models to where either there are smart contracts or legal contracts in place that disallow that or disincentivize that or, more preferably, there are economic models in place that are more worth it for you to do the right thing, that you actually earn more money that way. And those are tricky, but I think one of the reasons they're tricky is because there's so many middlemen and so much of these investment opportunities are locked out to little bits of money who don't have large. They're not trying to make a million dollars off of an investment. They're trying to make a small moral decision of I want to do some good in the world and it doesn't matter if my one dollar makes two dollars in five years. I just want to do some good and have some assurance that the good I want to, + +[51:00] Some assurance that the good I want to do, will actually happen. I think blockchain has a lot of potential to open up opportunities for that, for just by the nature of removing so many middlemen- because rather than having human contracts in place, you have computers. Again, it's math instead of morals that are addressing these things, and I think that's a powerful tool that we're only really starting to scratch the surface on how that moves just beyond kind of converting traditional finance to digital finance. There's some really interesting opportunities there, but they definitely remain to be seen how successful those can be over the next few years. But innovation in that space is definitely needed and I think it's valuable to look at that. Not to throw a loop here because I wonder if there's some questions available, but I also had a quick question for fred regarding his platform that I got a sneak peek of the other day, because I think you know, similar to how well I don't know, let me just ask the question, fred, + +[52:00] For your platform. Like you, you're sharing, that you had some like cross chain kind of compatibility and that you it looks like light mint marketplace. You become an anchor, essentially right, like you are an NFT anchor for Ethereum ones. With that I mean essentially, during the live I wanted to show that artists, let's say you, had issued some assets on some nfcs on Ethereum. So the goal was to show how you can benefit from Stellar to alleviate all the fees and then also still sell your heart. You know, because what people may not realize is that Stellar is actually a lot of tools for working with NFT. So, for example, you can use path payments in order to have not just cross asset exchanges but you can also have bid and offer discoveries, which means that basically, it's a very powerful tool to connect buyers and sellers and if you are an + +[53:00] Artist on Ethereum, then Stellar is a very it will become a very valuable platform, for the goal is to make it an alternative platform for those sellers in order to allocate the fees. There's also another things that can run on SDEX that people may not realize it's. For example, you can use a time bound transaction in order to settle all the bidders and the participant in an auction at the end of an auction after a specific predetermined period, and you can also use- you can use- the path payments tricks and at the end of the auction, also to implement a reserve price and things like that. So Stellar is extremely versatile as a protocol in order to implement all those, and people need to realize that it could become a very good alternative to those networks that at the moment have a big problem with speed. Do you see compatibility with other networks as well, outside of Ethereum? + +[54:00] Potentially, no, I did not investigate more. But yeah, I think the yeah, their goal is really to make it like that. If you are an artist on Ethereum, then I want to make it that you can try stellars and alternative and benefit sweet. I think that clarifies my question on that. All right, well, this is. I don't know if anybody has any last minute. Otherwise, we're going to open. We have a couple of questions from our engaged and interested audience which I would like to pose to post to the group. So maybe for this would be an interesting one- any plans- and this is kind of a little bit towards SDF, or at least the Stellar side of things, but there- are there any plans to make non fractionable an option in the tamil file? It'd be great to issue whole coin NFTs instead of 0 00001 NFTs. So how are you addressing that? How + +[55:00] Do you think we should address that? There's a, there's SEP work to be done here. There's also smart contract stuff that can be done here. Some of it's just display mechanics right on the platform that you're displaying your NFT on, because you don't, at the end of the day, it's a display thing, but I don't know, fred, how are you addressing the zero one, zero one issue? Yeah, so this is like this has been a hurdle for developing the marketplace because, first, it's not very friendly to users, but there's also some limitations on these dates because of the 32 limit. So there's, you can't price easily an item over 214 75 of any unit. So, in order to solve that, for example, what we do? We use a intermediate market. So, in instead of posting just one big offer, we post two and we issue an asset and price this asset. The problem is that it creates cryptic orders on these DEX. So I think there's definitely a need for addressing this + +[56:00] In order to make it clearer, to prevent the creation of those buffering assets. And, yeah, that could be I'm not sure. My favorite ways would be to go with lifting the limit of it in 32 on the SDEX. That would be my favorite way, the most natural. And then also in terms of being able to, create non fractionable assets or fractionable at very specific decimal place, then that would be the best foster. That would be the best foster. Yeah, there's definitely a lot of different ways to address the issue. There's a lot of reasons that they are issues, whether it's selling issues or selling issues on the DEX, or just visibility issues, where it's weird to see that much fraction. But I will say: you don't, you can issue, particularly because we have functionality around auth required flags. You could issue one of something and then if you built a smart contract through either the Stellar turret + +[57:00] network or even just a centralized service that allows people to sell, buy and sell that asset, you would basically allow that sell that sale of the NFT only through this one like endpoint, where it opened up authorization to whoever was purchasing it, sold that asset in an atomic swap on with the operations and then closed access. So you could, you can absolutely do this already. It's just not incredibly natural but it can absolutely work. I'm particularly interested in the atomic swap stuff because that removes so much of the math issue where you can sell 0 0000 whatever for an infinite amount of the counter asset or the max amount of the counter asset, and not run into any math issues. It's just that how do you create that transaction in a decentralized way where there isn't a third party? That boy, I sure hope they don't go down or I can't move my asset. + +[58:00] So work in that arena is something that's super interesting and to me and stuff something that I'm particularly focused on. Good any follow up questions around that, otherwise we'll move on. So here's one I want to buy a house with a stranger. This has been historically a horrible idea. Is there any way XLM or NFTs can alleviate all of the problems with co ownership of homes or real estate? I don't know Steve. Zack, thoughts around co-ownership with strangers on houses, I'll let Steve go first. I'm happy to as well. I was gonna say after you, but I? The short answer to the question is no. I think you still fall out the same ways. I mean it's there's going to be a contract of some sort between you. But maybe I'm being limited in my thinking. What do you think? I think that NFTs can solve a lot of issues with traditional real estate. I think, if you, but I think one of the. + +[59:00] I think if you, but I think one of the things, and this is a spiel that I'm working on, so I'm going to test it here. It's like with all these other networks, you know, decentralized, distributed, trustless systems, or that's the highlight, right? So, like bitcoin is pressless period of your payments, Ethereum is stressless, like applica, decentralized applications, and you can, you know, build on topic for financial stuff. Stellar is interesting because there's this inherent requirement for trust on the network, because it tethers not only digital assets but also real world assets. And so if you take that kind of same principle to NFTs, like you know, there will be things that exist in real life, that can exist in both worlds, but you will still need to have a trust in like whatever that custodial power is. So when I'm talking about NFT's, like circumvent the title company, you know, ideally, you know that the title companies would anchor these assets and just adapt to the space instead of be relegated out. It creates new + +[01:00:00] Economies for them. They could have better processes. Maybe that lowers the cost for even doing the signing table stuff. Instead of getting everybody in a room to sign like they can just click a button and digitally transfer that and so, while you still have a record of the physical thing in their vault, you now also have a digital representation that anyone can view on ledger. And I think that you have to have trust when it comes to real world assets and there will always be have to be a facilitator for that. But I don't think that means that you can't disrupt systems like housing now with fractionalized stuff through the questions point. You already have projects on Stellar that are doing that kind of stuff where it's like an apartment, like share, like you can fractionalize that, and that's been a big use case that I've seen. There's, yeah, I think, a small handful of companies doing some cool stuff with that already on the network. So that's that use case is proof like that can work. But that's a different, model completely than + +[01:01:00] Like something like just, I want to give self Tyler my house. Here's the title: okay bye, like it can't do that We can do it with cars, for some reason, though, but the government still verifies that here in the united states. Yeah, there's reinvention of systems, which is great. I suppose I interpreted the question of: will I better live with the person that would be using fft? Oh yeah, I may have misinterpreted it as well. Then, yeah, depends on the use case. Here we are, we, you know, gonna live in the house or are we just using as an investment? Because investment, I think there is opportunity. But if you're just buying with a random stranger on craigslist, that's, I don't know they're still humans. Well, sorry, just to comment on that, Tyler, it, that is. It gets really interesting, because if you look at ownership, if you do, if you take what zach says and you create a system where the house is kind of containerized within NFT or nf, it'll have to be a number of + +[01:02:00] NFTs and if you can then easily split ownership, you know you can then take, you know, a 50th share of a house. Have that as part of your investment portfolio. Have everything transparent that you know that's interesting and from a portfolio point of view. I think so too, particularly when you start again opening up to retail investors or this idea of patronage, where you tokenize a restaurant or something and people who like that restaurant, or when you look at art. It's not so much an investment that I hope to resell this thing. I just want to support the people that I love and the art that they do and the things that they make. There's a huge target for that and it's not driven by greed, necessarily, at least it's not monetary greed, it's just I like this thing. I want to see it exist and thrive and there's a whole market out there that I think we're only beginning to really start to tap into. That has nothing to do with me getting rich + +[01:03:00] And more me enriching the artists and creators that I love. So we need to close out here. This has been a fantastic conversation. One last question here: for those who are not developers or people that maybe more on the art side of things, this would be a good question perhaps for fred: how can they get involved? How can we start issuing art and music and begin to take advantage of marketplaces like what you're building, fred? When are we going to get access what? What's the plan for the future for non developers what's the path forward? So it's going to be live very soon, just fixing the last details, especially about trust, stress, testing and security, because we have created some kind of like contracts with pre order transactions. Just want to make sure that there's no issues with this and this is going to be live on lightning com on the website, you will + +[01:04:00] Have tools to create and, without any programming required to create, to issue your asset on Stellar. So just need to specify the art. And if you want to, you can store it on ipfs. You can install it somewhere else and also we are also going to run an anchor for you. So there will be a review, we. But, yeah, you will be able to issue your assets without any technical knowledge. Fantastic, very excited. Well, thank you. Any closing thoughts? Comments? I mean, I mean, I only have two closing things. Santa won the first to bounce off of fred. I think there's. I think keep an eye out for that. I'm eager to experiment with it after seeing it. There's also others, I think, around the ecosystem already that you can play around with that. Do it like ever trying, I think, account based nfps and I don't have the urls off the top of my head but I'm sure you can like dig around a little bit if you're part of the Stellar community. What I would say to do is that if you'd want to see and if more NFT projects on + +[01:05:00] Stellar, get involved in the conversations on the dev group about it and like start talking to other people that are developing NFT based projects so some standards can get uniform across, so these different things can begin to interpret the same, like different assets that are published the same way. That would be my closing statement. Yep, absolutely Stellar global. Find us on Discord and start the discussion. That's a great place to land. I don't know if anybody wants to call out where they can keep up with you and the work that you're doing, and then we will close out Steve. If you want to start where people can keep track of the work that you're doing, yeah, absolutely. Well, come and say hello at task I o. My like to be on twitter. I'm at Steve Walker without an e at the end. So, Steve Walker and great to connect and really appreciate you inviting me in today. It's been a + +[01:06:00] Great experience. Thank you, super. Yup, it's been great to have you, fred and Sam. So if you want to connect with the project, you can come and join our community. We are- we have a Discord community, over a thousand members- lightmint js you can type in your browser and you will get the invite link and then I'm there almost every day to talk. So if you have any questions, just don't hesitate. And zach, where can people pester? You guys can tag `@stellarorg` on twitter and I will see every single thing that you have ever pasted there multiple times. And yeah, and then also you know the regular places: cubase. All right, super. And once again, I'm Tyler van der Hoeven. You can find me t y v d h on twitter. That's the best place to find me. I also do hang out on Discord occasionally. This has been a ton of fun. Thank you all for joining. We will catch you later, hopefully, + +[01:07:00] Conversing around art. Thank you very much. See you all, thank. + +
diff --git a/meetings/2021-04-22.mdx b/meetings/2021-04-22.mdx new file mode 100644 index 0000000000..37d46e6d68 --- /dev/null +++ b/meetings/2021-04-22.mdx @@ -0,0 +1,159 @@ +--- +title: "AMM Resource Economics and Routing" +description: "This overview explored two draft proposals to introduce automated market makers (AMMs) into Stellar Core, focusing on liquidity provisioning, DEX integration, and the economic and governance tradeoffs involved. The goal was to surface key design questions and guide further discussion." +authors: + - david-mazieres + - eric-saunders + - jonathan-jove + - justin-rice + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [legacy, CAP-37, CAP-38] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session marked the starting point for bringing automated market makers (AMMs) directly into the Stellar protocol. Rather than focusing on implementation details alone, the discussion framed AMMs as a new piece of core market infrastructure—one that could complement Stellar’s existing orderbook to improve liquidity, reduce friction, and make asset exchange more accessible across the network. The goal was not to finalize a design, but to surface the most important tradeoffs early, while both proposals were still in draft form. + +Participants compared two competing approaches, CAP-37 and CAP-38, each attempting to balance simplicity, predictability, and long-term flexibility. Much of the conversation focused on how AMMs should interact with the existing DEX, how costs and reserves should be allocated, and how much complexity should live inside Stellar Core versus being left to market participants. What follows captures the main themes that emerged as the group clarified priorities and identified open questions for continued discussion. + +This meeting introduced two draft proposals—CAP-0037 and CAP-0038—both aiming to add constant-product liquidity pools to Stellar so that users can provide or consume liquidity without maintaining standing orders on the orderbook. + +### Key Topics + +- Resource management and fee economics: how liquidity pools should be represented on-ledger, who (if anyone) should sponsor pool entries, how LP share trustlines relate to reserve requirements, and how to avoid “free riding” while keeping pool creation accessible. +- DEX integration strategies: CAP-37’s approach of continuously routing trades to the best marginal price across pools and the orderbook versus CAP-38’s simpler, deterministic choice between pool-only or orderbook-only execution, along with the implications for complexity, arbitrage, and user expectations. +- Pool behavior and evolution: handling deposits and withdrawals, rounding and slippage protections, the possibility of multiple pools per asset pair, and how parameters like fees or curve types should be fixed, governed, or extended over time. + +The discussion closed with agreement that AMMs represent a significant shift in Stellar’s market model and that further debate on these design choices should continue on the developer mailing list before either proposal advances. + +### Resources + +- [CAP-0037 – Automated Market Makers (Draft Proposal)](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) +- [CAP-0037 Discussion Thread](https://groups.google.com/g/stellar-dev/c/Ofb2KXwzva0/m/LLcUKWFmBwAJ) +- [CAP-0038 – Automated Market Makers (Draft Proposal)](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) +- [CAP-0038 Discussion Thread](https://groups.google.com/g/stellar-dev/c/NLE-nprRPtc/m/GHlmlE7ABwAJ) + +
+ Video Transcript + +[00:00] Hello and welcome to the Stellar Open Protocol Discussion. So for those of you watching at home, I just want to give a quick overview. The goal of these meetings is to talk about and plan for changes to upcoming versions of the Stellar protocol. We go over Core Advancement Proposal, or CAPs for short. These are open source specs that describe new features designed to evolve the protocol to meet ecosystem needs. All CAPs begin with a draft and they go + +[01:00] Through a rigorous process before they're actually implemented in Stellar Core. And even then they don't actually hit the network until validators vote to upgrade to the new version of the protocol that implements them. So, crucially, not all drafts make it across the finish line right changes to the protocol. They require deliberation, diligence and meetings like this. They're just one part in the CAP life cycle. So today we have two CAPs to discuss, and again, these are both drafts- [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) and [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md)- both of which have the same goal, which is to introduce automated market makers- or AMMs for short, at the protocol level. So automated market makers are a simple way to provide liquidity necessary for asset exchange. You may have heard of things like uniswap. They've shown that automated market makers are effective at providing an easy to access liquidity at scale. They essentially allow for the creation of liquidity pools which are simple and non interactive and which can attract large amounts of capital and enable high volumes of trading. So the goal of both these proposals, [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) and [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), + +[02:00] Is to add automated market makers to Stellar in order to improve overall liquidity on the network. They differ slightly, but that they have the same goal. So two things before we start. One: this is a technical discussion, very technical, so if you want to follow along, I suggest that you read the CAPs and the recent developer mailing list threads about them, both of which are linked to in the event description. Two: AMMs are a big topic and the goal today is not to get to the end of the discussion about them. Rather, we're just beginning the discussion. So we'll start to bring key questions about these proposals to light so that the discussion and debate can continue. On the Stellar developers google group just getting started. That group is open participation, which means that if you have an opinion or some great insight about AMMs to share, you too can join in. Just follow the link in the event description. All right, without further ado, let us start to talk about amn. So again, we have these two proposals, but they have a lot of questions in common and then they also have some differences, and we're sort of going + +[03:00] To get into both today. And the first sort of topic that we wanted to talk about was a question about reserve requirement and how it works in each of these CAPs. So [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) has the pool creator, the person who creates the liquidity pool, paid for the pool ledger entry, and [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) does not increase the reserve requirement. So I guess the first question is: which model is better? Is there any concern about either model? So reserve requirements: well, this isn't very deep, but it's easier to reduce reserve requirements than increase them. So you know, if there's doubt, I guess it'd be more conservative to have the reserve requirement initially. Maybe we can go over the motivation for not having reserve requirements because most ledger entries do. + +[04:00] Yeah, I can sound just kind of weird, anyway. So in [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), that version of the proposal does not have the reserve requirement and the main motivation is basically, it's not remotely obvious who should own this thing in the first place. Like why should any given account be responsible for paying this, which is, you know, it can't be used by a single person in the first place, intrinsically a multi party thing, and being the person who created it isn't necessarily a meaningful concept. But more importantly, it's not that there's no reserve requirement at all because the pool can't exist if there isn't at least one trust line for the pool share. I should say so. Basically, it's kind of like you get it like you know, when you're the first person to create the pool, you get it like half off. You basically get two entries for the price of one. But then you know, if there were a + +[05:00] Thousand people using the pool, it would actually be like a thousand people paying a reserve for one single entry. Or if there were, you know, ten thousand people would be even more like that. So that's kind of the way that I see this motivation. All right, so there's no way to abuse this, because you can't just create an infinite number of these. You'll need infinite number of trust lines as well. So in other words, basically the trust line it sort of serves as the reserve requirement, rather than creating a new reserve requirement specific to a liquidity pool. Right, that's a pretty good explanation of it. Yeah, I mean, I think, like I mean that's a reasonable explanation. Yeah, so it'd be a bummer if we changed our minds, but yeah, and later wanted to like increase it, but that does seem reasonable. + +[06:00] I mean, if that's the case, maybe we're done talking about reserve requirements, like does anyone have anything else to say about them? Yeah, we sort of started with the easiest topic just to warm up. So if that one's that easy and we can move on, I guess, like maybe. So the reason I added that question in the first place was because it was more like thinking for, like they are potentially very large. I mean the, pool, basically to describe it, you need to basically describe you know what, which assets are part of that pool. Right now we're only discussing concern products as part of this, but you can imagine having a lot more complicated type of pools, so maybe the + +[07:00] Amount of data that is tied to this one pool could be much larger than what we are talking about now. For example, I think in the, I think compound type of world. they have like multi assets, like more than two assets actually in your pool, and I think in if you have like kings or v3 type of stuff. Yeah, now you have like those bitmaps that are in there as well. I mean like the cost of that becomes much larger. So I don't know, you know, is it? Does that count really just as a single ledger entry, or do we need to do something better? If we, if right now we make it basically implicitly attached to a truss line. We're kind of stuck with that model forever as opposed to maybe if it had, if it was separate- we could imagine, okay, if we change our mind- and it has to be more expensive because, you know, those things are gigantic- we can, you know we have that as a + +[08:00] Separate thing, but maybe that a fun because, yeah, the creator, like we can maybe give the first trust line free or something I mean kind of equivalent, right, like as part of this. I mean the thing is like there shouldn't be like a gazillion of these things, right. So if it turns out that, you know, I mean I can imagine, going forward that it turns out that there's any number of things like it might turn out that we really only want a few hundred of these liquidity pools in the network and so we would turn it into like an auction to see like which asset pairs are, like you know, worthy of having the automated market maker or you know, or it's just kind of. The thing is, even if it costs like 100 lumens to create one of these things, it just wouldn't be that big a deal because that if it's actually a useful liquidity pool, that cost would be amortized over a large number of users, so I guess it doesn't seem like it really matters, because I guess we could also say, + +[09:00] Because I guess we could also say later on, like hey, price just went up, but will grandfather in the existing liquidity pools or something. So what happens to the pool when the last trust line disappears? I think in both proposals it's supposed to be automatically removed. I'm just wondering if there's some kind of spammy edge case where people create and destroy trust lines quickly to get some kind of ephemeral pull advantage. I mean, I guess it could, but like the nature of the pool, it's not really useful if it's not sitting around, right so and you can't be a participant in the pool if your trust line doesn't exist because you wouldn't be able to hold the shares, So I it's hard to see how you could get value from that. I do think that, like nikola has a good point about the multi asset pools. You know like balancer type pools, + +[10:00] And I think in that kind of a setting, we could always say, you know, like, okay, well, we're gonna charge fees for charge reserves for those types of pools, but not for these types. You know, I think there's a lot of flexibility. You know we already have a mechanism by which we can do this, which is sponsorship, so we wouldn't have to invent anything new. It's just a matter of like. Do we think that's even necessary at this point? For me, the answer's like, probably no. I mean like if we? To me, yeah, I agree that right now we don't need it. It's more like: are we, is it possible to add it later if we have to, if we don't, if we're not careful? Basically because in the capital eight world, I guess we would have to force people to wrap their trust land creation with some sort of like sponsorship + +[11:00] Thing, or maybe responsible by default, I don't know. Yeah, maybe that's fine. It just changes the cost of creating a trustline, so maybe that's okay. I mean, what would the change look like? In general, I'm not a fan of complicating today's thing because of potentially tomorrow, but you want to like, add, like one field to the operation. That's like the extra fee for creating the trust line or something. I think we should just be on type. So I think it wouldn't actually change the contract. So it's actually fine. Yeah, like we ever have a new type of pools that are like those gigantic things and we believe that they- you know there should be no free riding on this- then we can always add like this, like those would be sponsored. Basically, instead of right now they would like the. I mean right now you have the field that allows them to be sponsored, except we don't use it. + +[12:00] All, right, I think we can move on, okay. So the next question is about DEX integration, which I think is a bigger question. So both proposals give access to liquidity from both pools and the DEX, but they vary in their approach. John. Will you just walk us through the difference between [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) and [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) in their approaches to DEX integration? Sure, thanks. So basically, both of them put the access to trading with liquidity pools or automated market makers inside of path payment and that's the only access point to this functionality. There is no like trade only with the order broker, trade only with the AMM. It's just one operation which is more like trade. Now the difference in how these work is that in [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) OrbitLens proposal, like, the idea is that you're going to + +[13:00] Try to always get the best price. So you might imagine a situation where, like the you know you have, let's say you have an order and the price is at like a hundred dollars or something for whatever you're trying to buy, and like the marginal price on the amn is at 95 right now, but the AMM has like really small reserves, So you might say like, okay, well, first I'm gonna trade against the AMM until the price hits a hundred dollars, then I'll go and cross the orderbook and take that first order. And then you know, now I'm going to now, the next price is at 105, and so I'm going to trade against the am until it goes to 105, etc. And you do both, stepping back and forth between the two, depending on which one gives you the best margin price. In [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), to kind of avoid that complexity, the approach is basically: just what if I took the entire trade, against the AMM? Or what if I took the entire trade against the orderbook and just give you whichever of those two produces + +[14:00] The best price? So you would never do like some amn and some orderbook. It would either be all orderbook: oil a- m- sorry, best price or best volume. So even if the volume is tiny, you would take it. So I'm saying best price in the context of like. Imagine that I'm trying to trade a specified amount like I wanna. I wanna, you know, buy three thousand widgets or something. I'm gonna do whatever it takes to get the best price for those three thousand widgets. If I can't get 3 000 widgets on one of the two options, then that's not an option. Wait, sorry, so it's they're all or nothing now. Oh, I see, yeah, because that's working. That's right okay, But so in the [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) example, where it? always tries to get the best price and may execute part of it on it via the AMM and part of it via the orderbook? What does it do? It splits the amount that you want into a bunch of different path payments, + +[15:00] I guess. I mean it's kind of hard to explain what it does at like an implementation level, but it's all it's like still going to come through as a single operation and just inside the implementation it's going to know to you know kind of do this incremental approach, so it's completely invisible to the user, right. And so in both proposals can you have arbitrage opportunities between the orderbook and the AMM. So in the [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) proposal there are no arbitrage opportunities between the two because they get resolved automatically as you trade. In the [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) proposal there can be. But the reality is that with like spreads as wide as they are on the orderbook today, if there were ever a situation where there was an arbitrage opportunity between the AMM and the orderbook, there would also be an arbitrage opportunity between the AMM and any external orderbook for the same product. + +[16:00] Probably I mean like I can't, give you 100 guarantee on that. But you know, I think, it's pretty common to have like spreads on the orderbook today, greater than half percent. So I would think that you know if you're looking at any more liquid place in the world. That would probably get you into an arbitrage universe there. First, with the [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) implementation, would it be possible to build an attack where you constantly switch between the orderbook and the pool, like a with a single path name? Like is that a concern? What kind of it? Like what would this attack do? Exactly? Right, it would take the minimum amount from the pool and it decides that the orderbook has the next best price and then after that it would switch back to the pool and just keep going back and forth, while in [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) you would just choose one or the other right. Do you see what I'm saying? + +[17:00] So this is I mean like there can only in. the world where there's exactly one orderbook and exactly one AMM, there can only be as many switches as there are offers in the worst case. So like if there's 10 000 offers, there could be ten thousand switches, but there couldn't be twenty thousand. That would be impossible. Does that make sense? Do I need to convince you of that? I could, no, I think that makes sense. Okay, yeah, the worst case behavior is to DEX not, you know, that's always the answer. One comment I have is that whatever we implement in core, we have to be able to model it outside of core. Like Horizon has to be able to calculate the same thing to give you the best path payment, and presumably third parties would also want to do that. So we should bear that in mind. Like it can't be completely opaque. + +[18:00] What do people kind of feel about? What's the good approach here? I think one important question here is: like [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) seems to suggest that the DEX is a valuable thing and should be maintained and the markets will be maintained alongside. Because if the, if everything just migrates to the automatic market, makers then have this like complexity of interleaving requests, of no leaving offers seems to be like doesn't make sense, right. So the question is like: do we think that the ecosystem is going to migrate like are we? Are there use cases that automatic market makers are not + +[19:00] Providing a good solution for as much as the DEX is? I think there are, but it'll probably be per asset pair anywhere, like if I had to predict, I would say that for you know, if I want to like you know, do an ipo of my company on Stellar. I'm gonna just basically like put the orderbook there. I mean it'll be offer my shares for sale, like on the orderbook or something right. And if I'm trying to do like forex exchanges or whatever, then probably the AMM is going to be best. So it seems unfortunate that kind of like: the more intermingling of these abstractions, the harder it's going to be to, like you know, improve each individual one in the future. The more complexity there's going to be and it's not clear what the payoff is going to be. The market will do better with impermanent loss, right? So when there's knowledge outside of the blockchain, the market will take advantage of it much more effectively than the AMM. Yep, seems like an argument to have it. + +[20:00] You know I'm not saying get rid of the. existing DEX. I'm just saying, you know, have both, but don't have them terribly intermingled like they sort of. Ideally, improvements to the mm don't need to, like you know, change the logic of the orderbook, as you know, as much as possible, and vice versa. So then it would just be a user choice more than anything, whether to use the AMM or use the DEX- a market maker choice. The user would just get the best price available for a path payment right, but someone who's trying to make a market could either use our AMM, or if it's not good, then they could use the DEX. Comer, I would kind of ask you like, can you think of anything where people would want to use, like would have a good justification for using both on the same product? Because I agree with David? Like I can think of lots of cases where the AMM will be better than the DEX, or lots of cases where I say the DEX, I should say the orderbook. + +[21:00] Lots of cases where the orderbook would be better than the AMMs, but I can't think of a case where people would be obviously indifferent between the two. Well, I think what could happen is that you know that AMMs are used to like bootstrap a market, but that then, when the market is like you know, a really big deal, then some big market maker will come along and they would rather have the control of setting explicit limit orders, because they can set, have a lot of depth and they can kind of adjust to x, you know, external factors that affect the price, and so they would prefer that and so I could see the AMM is way to bootstrap a market and then like a full on market maker using the orderbook once it makes sense for them to do that, because there's enough volume that is profitable for them to make the market in the traditional way. But [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) would satisfy that without the extra complexity. So at the moment it doesn't seem like + +[22:00] Anyone's arguing that the extra complexity of [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) is worth, has a benefit that makes it worth while really, is that true? Is there anyone who thinks [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) and the super interleaved process is substantially better? But I think the argument for that behavior is actually what orbit wrote, I think this morning on the dave mailing list, which is trying to limit, like, the number of times you have, like this, arbitrage opportunities between emm and the existing orderbook. The thing is that I don't know if you have any data really to kind of say that this is going to be + +[23:00] A big problem inside, like this is like inside a specific, like, even on the DEX overall. Like you have pairs that I mean yeah, are assets that represent the same thing, right, they're just issued by different people and that doesn't solve. I mean, [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) doesn't solve that. So that's why I'm kind of a in the camp of like starting with this simple thing, first see how that goes and then adjust, like based on that. Because, yeah, this interleaving- so far on, you know the type of things I'm saying- it's actually very hard to even come up with like environments or things that you want to happen. If we want to change the, what the mm is doing, like if it's something more complicated than constant product, I'm not even sure + +[24:00] We can, yeah, explain what we exp, what we expect to happen, like we want the best price, but when you have like grounding and all those issues that come in play, it actually becomes really complicated to try to reason about this. So I would delay that decision as much as possible. That's my opinion and there's a chance that you know we'll want a third thing. That's neither an AMM nor the existing DEX, and so you know it's nice the idea that, okay, we can just have multiple approaches to this coexisting and they don't have to be tightly integrated. That leaves us more flexibility in the future. Great. Anything else about DEX integration, or should we move on to the next topic? is pool parameter updates. + +[25:00] I think sid was working on something related to this. Is that true? Yeah, I wrote something on the that mailing list yesterday. I can talk about that if you like. Yeah, will you talk us through it? Yeah, so it. It's. What I propose is an extension on what orbit? Pros, where you include the fee in the ledger header. So, instead of just having one fee, you have, you know, all the fees you may want to use. For example, if you had a couple different constant product pools with different fees, you have all them, the ledger header and, depending on which pool you're interacting on, you would pull the fee from there. This will allow you to upgrade the fees without a protocol update. The issue here, though, is that you know, the more pools we add, more parameters we add wherever we. If we store this in the ledger head, it'll get polluted wherever we decide to store it somewhere else. + +[26:00] Like it may become an issue. So that was what I proposed. I think Niko had a different proposal related to storing the parameters in the ledger pull key, I believe, and maybe you can talk about that right. So I guess, like, what I described was not necessarily where to start. I mean like there's like there are actually different concepts here, because, let's see, it is the other question after. So just maybe one thing that will help me, can we just talk about what parameters there might be that we would need to store other than right? So like the type of things that I think people were talking about. So, for the obvious one is: we have a proof, a puffy right. + +[27:00] That's the one that is kind of the really the only parameter and a constant product pool. We have other parameters also, like minimum amounts that we want people to be able to deposit like basically a unit. You can think of it as a unit of deposit or withdrawal that helps with guaranteeing certain levels of liquidity. Basically, when you do all those operations, what there's, I think some of those when you have like the more than two assets type. Of course, those have actually formulas that are getting even more complicated because now you have like different weights for those assets. I think those are like the ones that are known right now, right, I think for like + +[28:00] Right now, right, I think for like unison b3 type of pool. You have like a step size also. I mean yeah, it's up to future. So- and there are a few other things that are like, for example, curve has this amplification factor and that would be something else that would have to be stored if we were to use that. Is there an issue with storing all of this in the leisure header. I'm assuming, like you want to get two actually, so the one, actually, I guess, like when I wrote this question, what I was thinking at the time, was more: like, there's this question of if you store the parameter as a global as opposed to be something that is attached to the pull itself, well, that means is that, if you want to ch, if you change the value of that parameter, so if you change the c, for + +[29:00] Example, you're actually changing the fee for every pool in existence, every instance. Right, and actually have a hard time to kind of understand how that model works in a world where you have other parameters other than the key, like for the fee. Maybe it works for other parameters, I think it changes the dynamics of the pool so much that you're actually you're really talking about a very different pool at that point and it may not actually be something desirable. This first version is: we are establishing the model that we want going forward and you know, constant product is very special and I don't want to be stuck with that, you know, with whatever choice comes out of that. So you're really focused here on the update. Like updating the + +[30:00] What? Should it be possible to update the parameters on an existing pool? It's kind of the main crux here. I think it's yeah, it derives from this choice, right, that is, if it's a global, that means when you update it, you change everywhere, right? My stance on this is that, like, you shouldn't be able to retroactively update things. You know, if you wanted to switch to a different pool, create the new pool, move your money into there and let people vote with their feet or with their money, however you want to say it. My stance on this is more a ux thing than anything else. You know what I really don't like seeing looking on my bank statement and being like: oh, I used to get 1 5 interest, now I get one percent interest. I didn't agree to that. Nobody told me until after my money had already been sitting there for however long. So I don't think I want to impose that on to other people. So a pool would have fixed parameters + +[31:00] That could never change once it was initiated somehow. So what you're saying, that would be the stance that I would take on this. I think the only exception to this, might be like: perhaps we might make the exception that these can change during a protocol upgrade, because generally we take the stance that we can change the rules on protocol upgrades. And you know, I don't think anybody would be super offended if all the validators plus went through this huge review process to agree like this is gonna be better uniformly for everybody. So we should make it the de facto. But isn't Nico's point that when you make the second pull, if you wanted to change the fee in a second pull, it would affect the fee in the first pull? Are they coupled like in Siddharth's proposal? And I'll let him talk more about it. But he's offering like these three different fear. In his example he's offering these three different fee tiers and you can imagine being in the mid tier fee. You know + +[32:00] You could have the low tier one, the mid, two one, the high tier one, and then you know the validators vote like, okay, we're gonna move the mid tier one from point five to point six or something like that. So it would still be possible to be in a world where there are multiple pools and still have multiple fees, but have them globally controlled so that existing ones get changed anyway. Not saying that I support that, but I think that's possible. But, Siddharth, you can step in here if I'm not hitting this correctly. No, that was right and that, like, the nice thing here is that you and I don't know if this actually matters, but you control the number of possible pools, right? I don't know if that matters for, like liquid liquidity fragmentation, right, like if we allow, if we end up allowing like 10 or 15 constant product pools, like will that be an issue? Or do we assume that everyone will move to the most optimal pool? Optimal pool, like I'm not sure what will end up happening there. + +[33:00] I tend to think that people would have probably eventually settled down onto the right thing, but I also don't think that's a particularly nice user experience. It's like decision paralysis when you get to the grocery store and you're like, wow, there's a 37 brands of corn flakes. Which one am I gonna buy? Yeah, why do you need that in your life? So it's. It does sound like if we were to make any of the parameters mutable, it would only be the fee, right, like if? Like we shouldn't change anything like the curve or anything like that for any of the. You know, if we add a new pool type? Like those should not be mutable, right, I don't think those should be mutable only from like I don't know their. Nikolas shared this very + +[34:00] Cool defect report that colton, long ago, had shared with me and I shared with nikola, nikon. He posted it onto sellerdev- which is about changing the curve parameters other than the fee. I say defect report, I guess I should say vulnerability, whatever you want to call it. And if you're interested more, you should review that because it's very scary. I almost feel like this shouldn't be a configurable parameter. And if we do want to introduce a different fee, we can might as well just introduce a new type of pool. Like I think that a new type of pool, like I think that doing like, even if you don't change it for existing pools, I'll still, you know, I'll put + +[35:00] Some money into a pool one day, get you know one specific fee and the next day, after the configuration, change. It's a safe type of pool, but now I'm getting like a different type of fee, so that there's like it's just not explicit enough. A couple of people have said: why don't we just introduce a new pool, or why don't we do this? Who is the we in this situation? Does that make sense? I think there are two different ways one could take this. One way to introduce new types of pools would be that it's a protocol change. You know, like, if we're gonna go from having only constant product pools to having constant product rules and I don't know something curve like or balancer with weights or whatever we'd have to. You know the Stellar protocol developers, you know + +[36:00] People who work on CAPs, whatever would have to make that happen. Another way to look at we- this is an idea that nikola had a long time ago about having this like, this notion of like, instead of letting people choose any fee they want or any whatever they want, there are some configurations that the validators have voted on are good configurations. When you're creating something, you're basically choosing from that menu of good configurations and so in that setting we could be like validators. You know they voted on saying that, like now, the fee can be both 0 3 and 0 05 you know. So, depending on the context, I think we could be either of those cool things. Are there more questions about parameters? I mean, I feel like there's still a little bit more thinking there that we can sort of do in conversation on the Stellar dev list. I don't want to move on, though if there's important things left to be said, + +[37:00] But I feel like we're sort of at a good, point to move on. Right, I agree like the next one is actually related and maybe more interesting actually you want to explain it. Yeah, multiple pull support. Yeah, so it's basically the two proposals are actually quite different there. Like [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) actually allows one asset pair to have arbitrary variations of pools for that one pair, so the variation can be on the curve type. It can be on any, really any other parameter, like we just talked about fee. So you can imagine having two pools on the same, two constant product pools, let's say, on the same asset pair, but one is going + +[38:00] To be, you know, super low c and the other is like going to be a high fee and then you know, the market kind of gets inside whatever capital. E7 identifies pools using the pool type and then, related to the thing we are talking about earlier, the fee is global. So at this point the only way you can have multiple pools on a given asset pair is if they defer on type. Do we want to have more of the first kind or the second time, right like which one is maybe more interesting? We want to have, like those, + +[39:00] A few number of options right in a for a given type, or do we think that one type enough? I think there are some like fundamental problems with the notion of like one instance of a pool per wait, hold on, actually, let me make sure I understand. Do you mean one instance of a pool per pair? Yeah, of course all this is fair, right, yeah, so one in? I think there's some like fundamental denial of service flaws, with one instance of a pool per pair, which is the question of who gets to choose what that pool will be like. Is it going to be the hyphy one? The low fee one is going to be curve. You know, I think, basically, if there can be multiple types of pools, we probably have to say that all of them can exist for the same pool to avoid having some arbitrary- and by arbitrary you can + +[40:00] Always take that to be adversarial decision maker- like: oh, this is a super volatile asset pair where, like you know, constant product with a high fee would be the best. I'm going to put in, you know, a curve with a super low fee to make it so nobody can use this. Don't want that to happen, but here, like you have the, you know, using [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) as, like the like maybe the as a talking point. Like the earlier we were talking about fees, do we think that we want to have more than one few? I mean, I think that we probably want to have more than one tea and not just force like people to pick, like, oh yeah, this is the right fit for you, because + +[41:00] I think that different, asset classes actually have different like risk profile and volatility profile and therefore you want to have a different fee. You know, like you basically want the like crypto to crypto assets to have a different, probably a different fee than the stable coin to stable coin. Type of fair right. So yeah, you can let them compete and, like you were saying, like you can't let a single person decide on which one is the first one. So then how do you let the market decide? I think that's kind of what I'm getting. So it would bring market dynamics to bear on these, given uncompeting pools. + +[42:00] I don't really like. For me, the reality is I don't really see what we're choosing between. Like the choices in my head are kind of like there is exactly one type of pool and we apply that uniformly across the board, or there's one you know. Maybe you say like okay, like you can have different curves, but all the ones with the same curve have the same fee, whatever, like whatever you kind of take that model at, you're kind of saying like I'm gonna try to fit all the pegs into a single shaped hole. If you take that approach. And if you don't take that approach, then you're basically saying, well, like I'm gonna let the market decide what's the non stupid thing? And I don't like forcing pegs into holes that don't that they don't fit in. So for me it seems kind of like a very kind of obvious choice. But I'm wondering if somebody sees the argument that goes in the other direction, like: is there somebody who's like no, like John, you're definitely wrong. + +[43:00] Well, I think the argument I held before on trying to force everybody into the same group is: you avoid, like, splitting the liquidity. But I'm not. That's not my opinion either, you know. So I can't really support it, but that I just wanted to call it out. That's actually one of the argument that people call out. Yeah, I mean, I guess for me it kind of depends a lot on what the set of options are like. You can imagine a world where, like we just imagined that it was possible to create a constant product market maker with a fee at every thousandth of a percent. There's no difference between. I mean they're not literally identical, but there's no material difference between a fiat point three percent or point three zero one percent or point two nine percent. + +[44:00] They're kind of like exactly the same. And I think in that world you probably do have liquidity fragmentation between people doing effectively the exact same thing differently, whereas, you know, in a world where you're like, okay, like you have two fees on your constant product- one is like 0 05 and one is one percent- for totally different asset classes, I think there's going to be like a very obvious place that people are going to want to put their money in those kinds of cases and there won't be a lot of blurriness. I'm not 100 sure, but I think the amount of liquidity fragmentation depends on how distinct the options are. But even in that case, if market dynamics work, wouldn't people be drawn to the fool with the lower feet? Right if they were the same, but one even had a tiny bit lower fee, and then the two pools would be competing and they would actually that would push fees lower. Isn't that what you would expect? Not necessarily. Like, for example, + +[45:00] If the impermanent, like if prices are moving a lot and so there's a lot of impermanent loss, then people might make more money from the higher fee, even if people traded less as a consequence. And the other factor is, if everybody's in the high fee pool, then and like, let's say that, like, let's suppose a world where there's 99- sorry, 100- equivalent market makers, 99 of them are in the big pool, which has the high fees, and the one other guy is in the small city pool, in like the [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) proposal for like, maybe some small trades would execute against the small pool, the small fee pool, but all the big trades would execute against the big fee pool because there'd be less slippage there, and so maybe that person would end up making less money by being in the low fee pool and then they decide to join the high fuel pool anyway, right? So I think it's not obvious. There's a bunch of different ways these kinds of things could shape out. + +[46:00] So I mean, maybe the answer to this is no, but I'm just wondering if there's any. If it's, it just seems better to have only one pool, you know, other than the fact that you would have to agree on the parameters. But I'm wondering if there's any way to kind of select the pool objectively, like basically say that you know, for any given pool, you, if there's multiple pools for an asset pair, you just pick the one that has the highest product or highest product plus reserve where people can pledge some reserve or something, so you can let people sort of bid on what they have, because what you kind of want, is like you want, like you know, in the kind of Stellar fashion of like it's just to everyone's advantage to do what everybody else is doing, which is kind of like the whole philosophy of Stellar. On some level, you kind of would like market forces to decide this, and so to the extent that then somebody, a lot of people, decide that you want to do something else, then it should like make sense to move over to that. That's something else. But I mean that's John briefly mentioned this. My new idea + +[47:00] Earlier, so that's kind of what I proposed earlier is basically try to kind of find a good middle ground, right, like you don't want to have like a single pool. You know where you're forcing to this one rate, you know across all assets. You know that's. I think that's kind of trying to. That's too prescriptive on the other hand, if you allow people to set any price, you know any fee and all that you end up with like this huge, like you can't pick a pool, basically like there are an infinite number of pools. So the menu idea is to have validators actually describe the combinations that you are, that you can use on the network, and the combinations would be a combination, for example, like constant product, you can have like low, medium, high, c or something like that, and then when you want to deposit assets in a + +[48:00] Pool, you pick which of which item on the menu you want, right, you can say, okay, this is like a stable coin one, so I'm going to pick this scoff paying with this parameter, right, okay? So here's- I think you know this is probably not workable, but just to like, get some design point out there. Suppose that you know, people can sponsor these liquidity pools, using lumens, and basically you can propose any value, any parameter you want. However, in order to introduce a new liquidity pool when there's already one, for that asset pair, two things have to happen: one, you have to pledge more lumens than the previous reserve as pledged, and two, you have to have it like as at least as much liquidity in terms or the product has to be at least as great in terms of the assets you put in there. So this would allow people to change when everybody wanted to change, + +[49:00] But it would sort of make it a little. bit sticky. What a? I get what you're getting at here, but like a kind of just practical implementation type question, before we talk about this on the merits, it's just like what about the case when you have pools that were like the product is a different number, the invariant is a different number. You know the curve, invariant is some really complicated thing. That's really gross to write down, and so they're obviously comparable. There would be some requirement that for any pool that we introduce, but there needs to be a depth metric which is a number and where somehow we believe that, in general, deeper markets are better than shallower. + +[50:00] How would you, I guess, like what would at like a high level, what would an implementation of all of this kind of look like in an abstract sense, but like we need some new kinds of entries and stuff like what is it like? How do you say that you're voting for one of these pools? Like what does it mean for, like, if you're voting for a pool but the pool doesn't actually work yet, what is, what are? How do all these things kind of work at a high level? Yeah, I guess you have some number of, let's say, you have some number of candidate pools and in order, maybe, in order to create a candidate pool, you have to pledge sort of twice the base reserve that's pledged for the previous one or something. So we can't get like an infinite number of these. And then for the candidate pools, there's some amount of capital that's pledged, and so if one of the candidate pools suddenly gets more or more depth, right, there's some amount of capitalist pledge, and if at + +[51:00] Some point one of the candidate pools ends up with more depth than the existing pool, then people would switch over. So the basic assumption here is that, like sort of 90 of the people participating in a liquidity pool would say, hey, like actually we want to go to a lower fee or something right. And so once everybody wants to do that, they can make it happen. But, like you know, a random person who's just trying to disrupt the existing market would have to, like amass a lot of capital and essentially end up making a more liquid market in order to- disrupt the existing market. It's a kind of interesting idea. I'd have to think about that a bit. I like definitely have some concerns along the same line, as somebody mentioned now, I think eric mentioned- about, like this, like ephemeral trust line type of thing. You can imagine somebody putting a lot of putting their money where their mouth is in like a huge amount, and then you know + +[52:00] Five ledgers later being like I just did, that to screw you guys all up and then just pulling their money back. On the other hand, if what they did is provided people like really low fee transactions for five ledgers, like is it so bad? Like they could have done the same thing by placing offers on the orderbook, right, it's a fair point. So we only have five minutes left and I think that's fine because I think a lot of these questions can we can continue to sort of talk about them or to think about some of the suggestions that were raised and deal with them. On the Stellar debt mailing list, there were two other topics that we didn't really get into. One was compliance and one was rounding. Do we want to take a second to talk about either of those right now? Would that be useful or is it too much to take on either one of those topics in the last four minutes? It seems hard and for me, I mean, I'm like I'm very interested in talking about these, but in detail, + +[53:00] It just seems like it'll be unsatisfying to get into them now. Okay, that makes sense to me, I mean. I think then the question is: what are the next steps after this meeting? Do we- I feel like it is, to continue the discussion on Stellar death? But are there other follow up actions that need to happen in order to make that possible? I'm still trying to understand whether we've narrowed down the scope of which pools can exist and which ones can't like for a given asset pair. Have we agreed we can only have one pool? Or could we have multiple pools with different kinds of curve for that asset pair, for example? Like, I would love to write that down. Well, I think what we were talking about was that we, I think they, we agreed that there would be more than one pool per asset there. + +[54:00] What we are talking about last was more about the governance that goes with introducing a new pool. Like you can do it in many ways. The good news is that we don't necessarily have to solve that. I think in this version because right now we're introducing only one constant product and I'm not sure we want to introduce in the first version the support for more than one rate. Right, like we can basically for now, like I think that's what capital e8 says, or even capital e7. That is, there's only one rate and I'm it's probably fine for the very first version, but it's interesting to think about how to change that in the future. Right, if you'd be willing, David, I'd be like very interested to see like a high level write up of what you were describing so I can + +[55:00] Of what you were describing so I can think about that more deeply yeah maybe I can just draft something by email or something yeah that'd be fine it doesn't have to be super formal sure and I noticed for the future but there was the idea of a menu right that was sort of validator determined there was this idea of you can replace by better with some depth metric were there any other contenders for ways to do this well that was the question in the first thing right that was don't even allow to have more than one right yeah which is a third option sure okay there's also the free for all option anarchy I love the anarchy every time I'm listening to this I'm like this is we're talking about anarchy or we're talking about like soviet style communism or we're talking about just like free market capitalism I feel like there's ideologies behind all these choices you know John's like there should only be one kind of corn flake + +[56:00] Right I can go for like three kinds of corn flakes just to be fair cool yeah you want the you know you're on the special occasion it's your birthday conflict right man I hope you have some like something like flat pink blueberry pancakes on your birthday not cornflakes I don't even know okay that's time thanks everybody and thanks everyone who's watching at home again this discussion will continue on the Stellar dev google group which you can find a link to in the actual event description there's already a lot that's happened there and we will continue to have these discussions in these meetings to start to hammer out more of these proposals but again two proposals a lot of discussion many things that we covered today but many more things to cover look there to see what's happening this conversation will happen both sort of asynchronously there and synchronously here in the future + +[57:00] And we really appreciate everyone for being here thanks everyone + +
diff --git a/meetings/2021-05-06.mdx b/meetings/2021-05-06.mdx new file mode 100644 index 0000000000..171f27e86f --- /dev/null +++ b/meetings/2021-05-06.mdx @@ -0,0 +1,125 @@ +--- +title: "Asset Authorization and Clawback in AMM Liquidity Pools" +description: "This discussion continued the CAP-37 and CAP-38 AMM work, focusing on how issuers enforce authorization, revocation, and clawback-like controls when assets sit inside protocol-level liquidity pools. The group compared two enforcement models, debated whether issuers should opt in to pool participation, and flagged claimable-balance edge cases for follow-up on the mailing list." +authors: + - david-mazieres + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: + - legacy + - CAP-18 + - CAP-37 + - CAP-38 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This discussion picked up the AMM thread from the prior session and zoomed in on a regulated-asset question that any pool design must answer: when an issuer revokes authorization, how does that enforcement propagate through liquidity pools so the asset can’t continue trading indirectly? + +The group compared two enforcement models. The first model treats pools like offers: revoking authorization on an asset trustline would also trigger redemption of any pool shares that depend on that trustline. This preserves issuer expectations and avoids a new operational workflow, but it pushes more complexity into Core (data lookups, indexing, and the mechanics of finding and redeeming affected pool positions). The second model keeps the mechanics simpler by shifting enforcement to the pool share trustline itself: issuers would revoke authorization on pool trustlines to force redemption. That approach reduces Core complexity, but it introduces a behavioral “foot-gun” for issuers who already have production compliance workflows built around asset trustlines and offers. + +A recurring theme was whether authorization-required issuers should be forced to opt in to allowing their assets in liquidity pools at all. Several participants argued for an opt-in flag (at least for auth-required assets) so issuers can’t accidentally end up with assets trading in pools while relying on legacy revoke behavior. Others suggested defaults that keep pools broadly usable, especially for the many assets that are not auth-required, and noted that a blanket “inhibit AMM” flag may not match any real issuer demand today. + +The conversation also surfaced a practical edge case: forced redemption may require creating claimable balances when the redeemer cannot receive the underlying assets (missing trustlines, insufficient limits, liabilities from offers, etc.). That behavior could be surprising for applications, so the group left the details open and encouraged more design iteration and feedback on the Stellar-dev mailing list. + +### Key Topics + +- Compared two issuer-enforcement models for AMMs: (v1) revoke the asset trustline and automatically redeem any dependent pool shares (offer-like behavior) vs (v2) revoke authorization on the pool share trustline to force redemption. +- Discussed an issuer opt-in flag for liquidity pools (especially for auth-required assets) to avoid “silent” behavior changes for existing compliance workflows. +- Noted tradeoffs: v1 is the cleanest contract for issuers but adds Core/data-layer complexity; v2 is simpler to implement but requires issuers to learn a new enforcement step. +- Flagged claimable-balance edge cases during forced redemption (missing trustlines, insufficient limits, liabilities) and left mechanics/reserve handling for follow-up on the mailing list. +- Parked broader AMM questions (multiple pools per pair, updatable parameters, reserve requirements/UX signaling) as ongoing work tied to CAP-37/CAP-38 threads. + +### Resources + +- [CAP-0018 – Fine-Grained Control of Authorization](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md) +- [CAP-0037 – Automated Market Makers](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) +- [CAP-0038 – Automated Market Makers](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) +- [Stellar Developers Mailing List (stellar-dev)](https://groups.google.com/forum/#!forum/stellar-dev) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to the Stellar Open Protocol Discussion. So for those of you who are watching, just a quick reminder. The goal of these meetings is to talk about and plan for changes to upcoming versions of the Stellar protocol, and so we go over Core Advancement Proposal, or CAPs. These are open source specs. They describe new features designed to evolve the protocol to meet ecosystem needs. So CAPs go through a rigorous process before they're actually implemented in Stellar Core and then put forward for a validator vote in a new protocol version, and so + +[01:00] Before the network upgrades validators, they have to agree to accept that new version of the protocol that actually includes the implemented change. This meeting is just a part of that process, right, it's where people raise questions and dig into technical details and work together to think through proposals, and we're definitely we're broadcasting it so that you can all- everyone out there- can see the gears turning. But I also want to say there's a complimentary, asynchronous discussion on the Stellar dev mailing list and we encourage anyone who has thoughts about changes to the protocol to join that mailing list and participate in the discussion. There's a link to it in the event description. I'm going to talk about more in a second, but first I just want to say: last meeting we started talking about two similar proposals, [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) and [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), both of which suggest a method to introduce automated market makers at the protocol levels. So automated market makers, AMMs. They allow the creation of liquidity pools which are simple and non interactive and which can attract large amounts of capital and enable high volumes of trading. So they're a simple way to provide the liquidity that's necessary for asset exchange + +[02:00] And that's the goal here: right to implement the change to the Stellar protocol aimed at improving overall network liquidity. So today we're going to continue the discussion about AMMs that kicked off about a month ago on the Stellar dev mailing list. This is a technical discussion, so if you want to follow along, I suggest reading the CAPs- both of them- and the recent developer mailing list threads about them, which are linked to in the event description. So one thing I just want to mention real quick is that this morning OrbitLens, who is the author of top 37 and who unfortunately can't be here today. I don't think he did raise some issues about the ongoing amen discussion in a blog post and there's a lot there. I don't think- anyone here has had a chance to fully process it quite yet, but I imagine that people will have thoughts and what makes sense to me is to discuss the points he raises on that. Stellar dev google group. I mentioned earlier that way we can sort of discuss things in a public forum that's set up for participation rather than trading blogs back and forth. The key point: we are currently discussing these ideas. We haven't settled on the best implementations for bringing amunds to Stellar + +[03:00] And we'd really love to hear the concerns folks have on the mailing list, where we're working together to find the design that serves users. So here's where we are right now. Okay, the last meeting we talked about a few things, mostly in the context of [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md). We talked about liquidity, pool reserve requirements, the relationship between the DEX and AMMs, whether to allow pool parameters to be updated and whether or not to support multiple pools for the same asset pair. Now there are still open questions about all those things- right, an ongoing discussion again on that Stellar dev mailing list. But today we're actually going to focus on something a little different. It's another question that needs to be answered by any AMM design, basically asset authorization and revocation, how that fits into AMMs. So, Siddharth, in the [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) thread you suggested two different models for handling, authorization and replication. And I'm just wondering if, to start us off, you can sort of walk through that those two models briefly and kind of outline the pros and cons of each. Yeah, of course. Yeah, so I'll go over + +[04:00] Two approaches for authorication and forcing a redeem of the pool shares back to the owner. So in the first approach, revoking authorization on either an asset trust line will redeem all pool shares that trust line- astrus line- has a balance in. So this will also require us to first make sure that acid trust lines cannot be deleted while they have a balance in the pool and we'll make the data layer more complex with additional queries and indexes for the lookup, because we have to look up all the pull, trust lines and pools. The good thing about this approach is that it works like author revocation with offers, so no additional steps are required when an issuer is regulating a specific asset. The second approach is a little different. The way it works is that an issue where an asset can in a pool can revoke off on the pull trust line instead of the asset trust line to redeem shares held by the pool trust line. So any issuer of any of the assets in a pool can do + +[05:00] This and then, once that happens, the pool shares held by that pool trust sign will be redeemed back to the owner. This approach will require the issuer to look up all pool trust lines that I want to revoke off from, which isn't that big of a deal. But the bigger issue we wanted to address is that the issuer will also need to be aware of the additional step during authorication. Because now that not only do you have to revoke off in the asset trust line, but you may need to broke off from the pull, you may need to broke off from the pull trust line. So John had an idea here to require the issuer to opt in to liquidity pools, so once using an authorization flag on the using the account flag. So once that happens then the issuer should be aware of nuances related to liquidity pools. The nice thing about this approach is that it's simple to implement, doesn't require changes in the data layer. The current cat38 proposal takes the second approach, but we should obviously get some thoughts on this and + +[06:00] See if that's what we want to finalize. On, one thing I'll tack on to what's it earth side is an idea from nikola that perhaps this opt in behavior for liquidity pool should only apply to the assets that are already authorization required. This was also discussed on the mailing list, but I think this is an interesting addition, interesting. you know if you're not off required, then there's no concerns about revoking stuff anyway, really, and in that kind of a setting, I think the majority of the assets on the network are not authorization required and so most things would just kind of transition seamlessly, which I think is a cool advantage. But I think there are some pros and cons to that. + +[07:00] I mean, is it worth just outlining the pros and cons to that? I mean, I think the major con is that it's just more complicated, like when you're trying to figure out what's gonna happen. basically, it's like, okay, I have to look. at this flag, but I also have to pair it up against this other flag, and I think that's kind of the major thing. Like, oh, you know, I'm looking at some asset. Can I put it in a liquidity pool? It's not as obvious in that case. So I think that's probably the biggest drawback of that kind of an additional step, and I think lee was trying to say something a second ago, so I'm gonna let him jump in. Yeah, sorry, I wasn't, I was muted. How would it work if you, if the asset, was not auth required, but then the issuer decided to enable author required + +[08:00] In terms of the ability to participate in pools. So, like, when adding auth required, would the issuer need to also indicate that the asset can continue to be used in pools? It's an open question that I hadn't even considered yet. So it's a good question. I think that's another aspect of the complexity of all of this here. Ideally, the way I would kind of like it to work, is that you, these things kind of work. like what's it called? The same way that auth required works today, which is that, as I've said many times, the name is very kind of misleading and it's really more like what's your default authorization state? And the liquidity pools thing could work the same kind of way where it's like what's the default authorization state for a newly created pool share trust line, and so then it could be like the default state is equal to the or of Is this flag explicitly set + +[09:00] And the not of auth required? Yeah, I think where the reason I ask it is because I think, like we're trying to, we're talking about adding that to make it less of a foot gun for people to for issues to not have to know about how liquidity pools work if they're already off required. But then the flip is true. The flip side is that for an asset that's not all required and that issue doesn't know about liquidity pools, if they suddenly make it was required, then they need to know about that flag. If yeah, if they're already allowing liquidity pools to use. So do we have any assets that are auth immutable but not auth required? Because if so, it seems like you'll be kind of unfortunate if you couldn't use those in AMMs. + +[10:00] No, they see, nobody does that. Nobody says I promise I'm not going to revoke this asset. I'm going to make it off immutable. Because that seems like the primary point of off immutable is to like promise that you're not going to suddenly turn on off the required flag, right? I mean like we have the like to me, the we, and I think I mentioned that on the main english, the degenerate case of this is actually the XLM right, It's basically a- are not required immutable. So I think in my mind it has to work the same way. Right for accident. This is what this- You know this type of assets and those type of assets. I think we want to have them on the network. They are basically like the purest from a, you know, crypto asset point of view. + +[11:00] Tomer, it's my understanding that, like the auth, immutable flag in general is just like not highly used. Perhaps not really anybody is using it at all under any circumstances. What? What's kind of the actual status of that right now? Yeah, that's correct. The author immutable flag is almost not used at all. I think maybe one issue where I need to look that up. But usually when issuers want to make a statement about the asset being locked down, they actually do the remove signature way to be something not achievable. So actually my comment would apply to that case too. If anybody sort of created non author required assets and, like you know, nuked the signing weights on the issuing account, it would be a bummer if those assets couldn't work with the AMM. If I would probably want it. You know a default on thing unless the + +[12:00] There are no such assets. So the question is like, do we expect such assets to exist? And you know, Nico said something very smart which is like this is like the crypto asset in its purest form, right, but to some extent it's somewhat of a myth. And if you look at you know, if you look at a lot of popular tokens in, like the erc20 universe, there is almost no such thing as an immutable asset anymore. Like all our erc 20 contracts are actually proxies so that they could be upgraded, if you know, and if you discover like malicious you know backdoors or whatever. So you know, there's almost always some sort of you know oversight. It might be some you know consortia via multisig or something like that, but it's very rare to have, you know, almost non existent, to have like assets + +[13:00] In it, like the pure, you know, untouchable form. And so I guess the other sort of question about the current state of the ecosystem is, let's say, that we kind of flip the polarity such that there's a flag that's off by default but you could just, as an issuer said, it says like don't use this in mms, or something right. Do we think that there are any people out there who, like they have an asset that they really care that it's not used in an AMM, but they sort of lack the confidence to, or awareness to, set such a flag. if we give them, like you know, a month's notice or something, I can't think of such assets because I feel like I wouldn't, I want this. You know, of course we want the mm to be usable as widely as possible and we also want to be in a position where, like, we don't have people sort of bitterly complaining that we pulled the rug out from under them. But beyond that, + +[14:00] You know so, To me that suggests we want- some kind of like the default on thing. Basically, I mean Tomer, from the perspective of like, what if we so like? The motivation for adding this kind of a flag in the first place, it kind of originated from this notion of like, what about people who wrote some code and have something running on their back end and it's, you know, issuing assets and receiving requests to revoke them and whatever? And these guys, they don't realize that something has changed out from underneath them. And, you know, people start putting stuff into liquidity pools, they revoke, but it doesn't actually really do the job because those assets are still in the liquidity pool, they're still being traded. Do you think anybody would even care? Because if nobody cares and nobody would really be in this situation like this isn't a problem, for it doesn't matter if this situation might arise a year in the future. If you show up a year in the future and you start doing something, Well, I expect you to know what the current + +[15:00] State of what you're doing is. It's only for people who already have operations, and if you don't think any of those people would care, then maybe we don't have to jump through any of these hoops at all. So I think both the both this edge case and the edge case that, like David brought up, are very edgy like they're just not. I'm having a hard time thinking about anyone that will actually be affected by this in the ecosystem. I think that you know, if we would have asked a lot of assets like that in the ecosystem, then I would say that David's kind of like on by default would make, would make sense, because they would probably want to participate in these liquidity pools, but then we'd have a problem with the mechanics of how to revoke, change and kind of like you know, and your point comes into play in reality. I just don't think it matters all that much. I think. I think what you suggest, John, is: + +[16:00] I think what you suggest, John, is probably more complete in that if anyone is relying on existing behavior, they don't get borked when you know when they try to revoke, you know like a new asset, you know liquidity, cool stuff, Yeah, I just don't think it matters all that much like I vote for what you said, John, for you know having an off by default, so that people know that there's like a special handling required, yes, required or not, just like in general. Because I think that's kind of one of the distinctions we are trying to draw in the conversation was for auth required assets. You do want it off by default, but the ones that are not earth required can be on by default. + +[17:00] That if we had it on by default, that like there's probably nobody who would mind, right, that's what I was confused about too, like turmeric, can you clarify actually, like, are you saying that we should remove the flag because you don't think anybody would actually want the flag to be set, or are you saying that we should do one of the things that either? I was suggesting, David was suggesting any of us were suggesting. I'm a little confused, so I thought we were discussing a dedicated flag that is on by default or sorry off by default for author required assets. Yeah, so I was supporting that. So, in other words, if you have an off required asset, you have to designate it. You have to flip the flag to say this auth required asset is now, can now be used in liquidity equals. Yeah, that is indeed what I agreed with. Yeah, + +[18:00] The opt in: yeah, I think, David, that would cover your case too, because if you're auth required, you need to have at least- oh no, you could just have- low signing weight only. I guess it also depends. Yeah, if you, can, you set this flag with low signing weight, I mean, I guess I'd rather I, you know, I'd rather stuff be on by default. Right, for the same reason. Like, auth required isn't the default, you know, I would like it if inhibit AMM weren't, or whatever, require a mm off, weren't on by default for anyone. Just, it's very easy to convince ourselves that there's edge cases where, like somebody is not, somebody is like not gonna be paying attention and it's going to run into huge problems. But you know, if these are edge cases, then + +[19:00] I'd rather just have the functionality on by default. It also simplifies the logic you know a tiny bit like, and it gives more possibilities. You know, maybe there actually is some asset that you like really don't want to have used in an AMM, for some reason, but you don't want to auth require it. So correct me if I'm wrong. But if they do have auth required right now, then and we turn it on by default, then the authorization semantics is understood by the issuer essentially get like invalid or changed. Right, oh, I see. So you're suggesting like a big database migration. On the protocol upgrade, which actually is another thing, we can just kind of like it's a, it's an inhibit flag that's going to be off by default. But, for starters, we grew everybody who's currently author required. We actually set the flag when we introduce it. Is that what you're? I wasn't thinking about that. + +[20:00] But and in that situation, if someone showed up new to issue a new asset, it would be, and they decided to make it auth required, it would automatically be on. It would be no, they'd have to decide. Like the same way, auth required is off by default. The AMM inhibit flag is going to be off by default. And so when you set one, you might or might not want to set the other. These are, like you know, it's part of your decision tree. It's the same as you have to decide whether you want to set auth immutable- which you know most people don't but- or like auth, clawback or whatever like we're just going to have. When you want to issue an asset, like these are the decisions that you need to make about your asset. And so if we kind of you know clearly document that you have to make like three different decisions about what your assets can behave like, then I feel fine going forward. And it's just a question of do we have, like existing assets where, like, people are like would not have the confidence or awareness that they need to like change this flag if they care about it and then you know, like a month after we announced + +[21:00] This and the thing goes live, then it would run into problems. Can I flip this discussion over for a second now and say, like these problems don't really exist as much. If we were really at all, if we did the other authorization model that, like the version one that starts listed, which is basically like make it work, exactly like offers work. Today you get your auth revoked, Now your pool shares are all going to get revoked too. Obviously, this has some like implementation burden on our side. It would make us have more things to test and whatever. But if it's justified then maybe we should consider that. But that's not sort of, that's not semantically equivalent. So the question is, I actually see these things as almost orthogonal, like there's two different things like. One is like what happens when, like you know, particular people + +[22:00] Get sanctioned or whatever. And another question is: do you not want there to be? You know, do you want a tighter like control of, like, the market in your asset? And they seem like different questions and I could imagine different asset issuers to answering different ways for the two questions. So, John, with the v1 you described, yeah, does that mean issuers do not have the ability to opt out of liquidity pools? We could still add that feature if we wanted to, but that feature would no longer be one of these kind of like you could make this work and everything would kind of look transparent from an issuer's perspective, like different things would be happening on the network but the same operations that they were doing to control accounts would all still work. So in the version one approach- I agree with David- the questions are orthogonal, like how do we want to control? Like do like + +[23:00] You have this choice on how to control the. Ah, you make a certain choice and then the other degree of freedom is totally open. But if you go the approach two, then the, then you're not in an orthogonal space anymore, because then you have this backwards compatibility problem that appears. But we could kind of answer the question separately, like maybe we want the flag either way, so then it matters a bit less. Yeah, so I'm reading Siddharth's outline here and I'm leaning towards v1 in terms of, like a more sensible contract that's in line with what we have today. I don't have, you know, you core folks need to, you know, express how bad this is in terms of implementation, because I think from a contract perspective this- just you know- is more in line with what you have today. It's like + +[24:00] It's variably bad- I guess is the best way to characterize it- in a world where we only have two asset liquidity pools. It's like probably not so bad, but like if we were in a world where we were like: oh you know, like balancer type pools with eight assets are actually great. Then it starts becoming kind of problematic. The reason it becomes kind of problematic is that we have to be able to efficiently extract all the pool shares that have to get revoked. But they might correspond to any of the assets in the pool. Let's say there's eight, just for the sake of argument. Maybe in pool one it's the third asset, and then in pool two it's the seventh asset and full three it's the first asset. So you end up breaking out all the assets into separate columns in your databases. You've got an index on every column. Right now, our an average table in our database has either zero or one indices. So like, for example, if we were in this world, that would add, you know, eight times as many indices as I think the trustline table currently has. So that's kind of the perspective here. There's probably big performance + +[25:00] Complications as soon as you add eight indices. You know, every index operation is basically a table operation. So I mean to be fair. You don't necessarily need to do that Many indexes like you can do it. In a way it's a little more like you know map asset to like a it's a. multi map. Basically like creating a separate table to query again. Yeah, it's a separate table for indexing, purely for indexing. It's. Yeah, but it's it might be a lot more efficient than that. Might be more efficient. More complicated ones, you, because that's one b three right as opposed to many b3s. Yeah, so I mean like that could be a better implementation. But then we have to maintain an extra table. So, yeah, it might be more efficient, probably more complicated. Look, v1 makes more sense from a contact perspective. I. Is there someone that disagrees with that? + +[26:00] Okay, I agree. Okay, so you know, in that case, you know, Nico, you folks need to put your foot down on whether this is like acceptable or the complexity in core. Just, you know, makes it so that we need to make the less sensible contract decision. Justin, maybe we need to let the core folks huddle up and figure out like how, yeah, I feel like that's true. Are there any other questions about this topic that we want to make sure to cover? Or + +[27:00] Is that our stopping point? Core folks go back and think about whether the more sensible contract approach- v1, whether the implementation is a show stopper, blocker, or is there more, I guess? The only question I'll still ask here is: like, do we still want the flag? Because, like the like, we could always add the flag later if people want to opt out and that would just mean that we have less work to do and we can deliver this sooner. Oh, I can reach out to some viewers and see if that's a showstopper in any way, Intuitively, I can't think of any issuer that will you know, passionately want to disable their asset from participating in liquidity pools. But I'm not sure. + +[28:00] Is there like to the extent I would? imagine people doing that? I could imagine a situation where what you really want is like you believe that there's sort of like two or three high quality assets that are KYC and whatever, and so you're willing and you're willing to have your asset, have a market against those and you don't want just like random people buying your asset where you can't sort of trace one hop and ask who that was. So is there any possibility to add, to give issuers control over what assets they're allowed to be matched? We'd like to have a trust line on the liquidity pool itself as opposed to an individual participant. That's an interesting concept. My kind of a priori intuition on this is that you, like you can't control that today anyway, because you could already do it against the DEX, like the existing orderbook, and if that's the kind of feature you wanted, you're probably using the + +[29:00] [CAP-18](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md) semantics, which is for people listening. It's like this, like fine grained authorization control, and it's mapped up against some SEP, which number I can't remember right now. So I can't really see how adding this feature at the protocol level would you're muted, David? You're still muted. None of us can hear you, David, sorry, there could be, like you know, a trade off required flag or something that basically it applies to both the mms and the DEX and it says like I actually have to authorize any other asset that my asset is traded against. So making it apply even to the existing order bill. Yeah, I mean interesting if nobody's asking for this, then you know it's probably not worth doing. But this actually seems more useful than just + +[30:00] Like blanket inhibiting AMMs. It's definitely an interesting idea, not one that I've heard before, I think Tomer can find out what people really want and that's probably the good, a good starting point. But if you're gonna do that and it's retroactive onto the existing orderbook, I'd say let's do it as a separate kind of proposal in a separate protocol version. probably just for the sake of expedience. But then if we think that actually solves people's problems better than the existing ones, then maybe we don't need to worry as much about the existing ones. You know, unless there's anybody saying, like you know, don't let my asset be traded in AMMs, but do let it be traded on the DEX, like somebody, yeah, which is weird, like it's hard for me to imagine what that use case is, but maybe it's there, I don't know. + +[31:00] Yeah, I think, given that you know we don't have this feature around the DEX right now and you know we haven't had issuers saying: you know, I'm not going to issue this asset because I'm afraid it's going to be traded on the DEX, You know, I think we're probably in the same boat with liquidity pools. Okay, maybe let's call today. Well, I mean, in fairness, there's probably people who say, like, I'm just not gonna issue my asset on a blockchain because it's because I'm scared of blockchains, and so this would be more like we could come to and say, hey, you've heard of these two issuers who are, like you know, reputable in the non blockchain financial world. Well, guess what like you could make your asset available only for, like you know, only for sale for, like customers of like these two banks or whatever, and that would sort of it would be a way of kind of attracting people who are otherwise afraid of blockchain by making it less unknown. + +[32:00] But again, this would be, I guess, a separate proposal. So right, so back to this issue. For this particular proposal, it seems like where we've ended, at least with today's discussion, is v1 makes more sense as a contract, but the question is, what does the implementation look like in? And that's something that the core team will think about. Seems true, right, seems like where we've ended up. Okay, I have a question about all three revocation. It's not really to do with v1 versus v2. I think it applies to both of them, and it's from Siddharth's email where he presented these two different options. It says an email that if the full shares can't be sent back to the owner, that is impossible, that they're going to get stored in a claimable balance for + +[33:00] The owner. Is that something we're still considering? And if so, I'm just a little concerned. It seemed that seems like that would be very unpredictable and applications like they may not really handle that edge case. So yeah, I'm just curious about that. So we're still thinking about how that case would work. Like you know, regards to claimable balances, and you know, we initially considered all like always putting it into account- clinical balance. But there are still questions around like how reserves would work here and things like that. So it's still open. We're still thinking about it. If you have an idea to how to avoid this problem, I'd be like super enthusiastic about it. I've had, I think, three calls of Siddharth, but we've like tried to think about how you could avoid doing this and we always run into some case which pretty much forces you to create a + +[34:00] Claimable balance as it stands today. But maybe there's some other thing we can constrain that we haven't figured out yet. I'm super open to ideas here. Lee, does that answer your question more or less? Yeah, I think. I would like to understand what all the? cases are where the claim will balance would be required. I can assume, I can maybe think of one, but I'm not. I'm sure if I certainly not really sure, if I certainly understand the problem. So some examples of some cases where you would require them, just off the top of my head. It's probably not going to be complete, But one case is you're in a pool share and you've you're in a liquidity pool and you've removed the trust lines for the corresponding assets. So, for example, it's like I'm in + +[35:00] An a usda usdb pool but I don't have the trust time for usdb anymore because all my usdd is in the pool. There's no way to return the money to you other than creating a claimant balance. Another case would be if your usdb trust line still existed, but because the pool has moved in your favor, which is kind of unlikely for these particular assets, But whatever, imagine then you no longer have sufficient limit to receive the additional amount of usb usdb that you would have gotten. They're all kind of variations on this problem. Or it's like: oh, I also have some order, book offers and their five liabilities, and then that causes me to have insufficient available limit to receive the balance, stuff like that. Okay, thanks, I'll have to think about it as well. There's also a bit of discussion about this on the mailing list. I + +[36:00] It's like quite a few posts from the bottom now, but nikolai was having. One of the things nikolai was chewing on was this notion of capping off the available balance, like the available supply of an asset, because right now it's actually possible for an assets about like total supply to exceed in 64 max, which is interesting. I went and looked and there were like 30 assets- all with funny names or largely with funny names- that have this condition, and so I don't think anybody serious- has this problem. And if we avoided this situation it might make a lot of other things simpler and it might avoid some cases with liabilities and stuff. But like it's definitely not like an obvious win. There are still some other things that can happen. We probably need additional cases, additional constraints in addition to that, but you can read more about that on the mailing list, Because I don't even remember what was in that post- entirely + +[37:00] Great. Any other questions or topics that we want to go over today? Or should we just sort of mull over what we've discussed so far and bring it back to the Stellar dev mailing list? Cool deal. So yeah, everyone watching, thank you for joining us for this protocol discussion. Again, the thing that we keep talking about, the Stellar dev mailing list. There is a link in the event description and it really is the place where we start to ha, to hash this out. Right, like all of these discussions are happening. There are discussions about both [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) and [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) and we definitely want to hear from people as we start to think through these designs in order to come up with solutions that really meet the needs of the ecosystem. So really appreciate it. Please join the discussion. Thanks everyone who joined here on this panel and I'll see you next time. + +
diff --git a/meetings/2021-06-17.mdx b/meetings/2021-06-17.mdx new file mode 100644 index 0000000000..6a84e7cd67 --- /dev/null +++ b/meetings/2021-06-17.mdx @@ -0,0 +1,125 @@ +--- +title: "CAPs 37/38 Interleaving and Adding AMMs in Protocol 18" +description: "This session focused on advancing automated market makers (AMMs) at the protocol level, comparing CAP-37 and CAP-38 approaches to liquidity, order routing, and deployment risk. The group aligned on CAP-38 as a practical first step, emphasizing simplicity, faster shipping, and the ability to layer more advanced features later." +authors: + - david-mazieres + - eric-saunders + - jed-mccaleb + - jonathan-jove + - justin-rice + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [legacy, CAP-37, CAP-38] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This discussion examined how automated market makers could be introduced into the Stellar protocol to improve liquidity and trading efficiency. Participants compared two competing proposals, CAP-37 and CAP-38, and debated tradeoffs around complexity, arbitrage behavior, network performance, and ecosystem readiness. + +The conversation converged on prioritizing a simpler, shippable solution that delivers value quickly while preserving flexibility. CAP-38 was viewed as a reasonable foundation that can later support more advanced routing and interleaving features if needed. + +### Key Topics + +- Comparison of **CAP-37 (interleaved routing)** vs **CAP-38 (best-venue routing)** for AMM integration +- Agreement that interleaving is theoretically superior, but significantly more complex to implement +- Recognition that CAP-38 can act as a foundational layer, with CAP-37 potentially built on top later +- Assessment of arbitrage risks, including increased failed transactions and Horizon load +- Conclusion that arbitrage is unavoidable and better addressed incrementally after deployment +- Emphasis on avoiding over-optimization and “perfect vs. good” tradeoffs +- Validation that network guardrails and validator governance can mitigate worst-case scenarios +- Consideration of UI and ecosystem readiness, especially around path payments +- Optimistic outlook that AMMs could reduce order book traffic and improve overall efficiency +- Formal vote by the CAP committee to move **CAP-38 into Final Comment Period** + +### Outcomes + +- **CAP-38** unanimously approved to advance to Final Comment Period +- **CAP-37** remains in draft, with guidance to revise it as a potential extension on top of CAP-38 +- Next steps include community feedback, validator review, and eventual protocol inclusion + +### Resources + +- [CAP-37 – Automated Market Makers Proposal](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) +- [CAP-37 – Developer Mailing List Discussion](https://groups.google.com/g/stellar-dev/c/Ofb2KXwzva0/m/LLcUKWFmBwAJ) +- [CAP-38 – Automated Market Makers Proposal](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) +- [CAP-38 – Developer Mailing List Discussion](https://groups.google.com/g/stellar-dev/c/NLE-nprRPtc/m/GHlmlE7ABwAJ) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to the Stellar Open Protocol Discussion for June 17- 2021. So this meeting, like all these meetings, it's a part of the process for making changes to the Stellar protocol and we live stream these meetings so that anyone interested can follow along. In these meetings we go over CAPs, or Core Advancement Proposals. These are open source specs that describe new features that are designed to evolve the protocol to meet ecosystem needs. And before we start, I do want to point- + +[01:00] And before we start, I do want to point out- that this is a technical discussion, a very technical discussion. So you may want to review the CAPs we're about to go over and the recent developer mailing list threads about them, both of which are linked to in the event description. Before we dive in, I also want to give a quick process overview so to allow for discussion and feedback collection and vetting. Each CAP goes through a specific life cycle: right, it's proposed, it's drafted, then it's discussed. On the Stellar debt mailing list, again, there's a link to that mailing list in the event description. So if you're interested, you should join it based on the feedback that comes in. A draft is then revised and re evaluated when that process is complete, it's put to the CAP committee, who decides whether or not to accept it. Now, after a CAP committee vote, the CAP enters into a one week Final Comment Period, which gives everyone a last chance to raise questions, also on the Stellar dev mailing list. And if the CAP makes it through that Final Comment Period, it's implemented in a major Stellar Core release. Then there's still one final step before it hits the network. Validators vote on whether or not to upgrade the network to a new version of the protocol, + +[02:00] Thereby accepting or rejecting the change. So ultimately, at the end of the process, the network decides on major protocol changes. Now, at the moment, there are two similar proposals on the table: [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) and [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md). Both proposals aim to improve overall network liquidity by introducing automated market makers at the protocol level. So there's general agreement that amns, which allow for the creation of liquidity pools, are a simple way to attract capital and enable high volumes of trading, and that introducing them to the protocol would have great benefits for the network. Right, it leads better liquidity, which means better cross currency payments. But while the proposals have the same fundamental goal and actually a lot in common besides that, there are a few implementation differences between them. The big one, it has to do with order routing right. [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md): it suggests interleaving AMMs for the built in orderbooks that make up Stellar's decentralized exchange, also known as the DEX. So in [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md). A buy- also known as the DEX, so in [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md), a buy or sell order can execute in part + +[03:00] Against an a? M and in part against the DEX, basically pulls from each to piece together the best overall rate. So that's interleaved order rally. [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), on the other hand, takes a best venue approach right. An order fully executes against one or the other, either fully against an AMM or fully against the DEX, whichever one provides the best overall rate. So that's best venue ready. There's already been a lot of discussion about both approaches and it's mostly happened on the Stellar debt mailing list. Today we've arrived at a point where we kind of need to start figuring out our path forward. So [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) has had more revisions of late than [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md), so it's a little bit further along. But let's imagine that [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) was brought to parity. So it dealt with the issues that cat38 deals with, things like authorization: which approach would we choose and why? That's where we're starting today and I'm gonna open it up for discussion and also, if I got any of that wrong, please say so, because I want to make sure that we're representing this + +[04:00] Correctly. So, who wants to start? Maybe I can't stop, given that I was the last to talk. I think on that mainly on the dev mailing list, so that people can take it from there. So I think, yeah, like we had like a good exchange, I think with marcus on the, on that mainly. So it's like the. I think it. We reached a point. I think, in that thread where we are basically thinking about, yeah, capital e7 as a, as something that can be done on top of capital e8, and I think, as if as a framework, I think where we ended up is really thinking about it as okay, like there are like some potential concerns there but like are we? Are they like? Are those concerns + +[05:00] Strong enough that they would be basically deal breakers for not doing like just capital eight? So, like the main concerns concern I had in that thread was, and actually like, as we've been moving along, capital e, the- this became true. The CAP create at this point is the longest CAP we ever wrote. It's actually quite complicated. It has many things in there like and I didn't actually anticipate that we had so much complexity. Like up front I thought the hard stuff was going to be around the arithmetic, like the fixed point math. You know that we had experience with before with the DEX and this turned out to be bad too. But yeah, basically much, many other things going on there and yeah. So where we are right now at with the with + +[06:00] This thread, is it looks like there are no like deal breakers right like the things, for example, interleaving- okay, interleaving is better, like I think nobody disagrees with that. But can we not having intelligence so doing best venue? Is that like? Is that something that maybe, even though it's not optimal, is that good enough to get us how to get this AMM solution out the door quicker? I think there was like a potentially other thing that was the this one we didn't even talk about, because this one is really like a very different, like a big departure basically from how, even existing mms in other like in other systems, like unisop, for example, like the dynamic fee topic that is in + +[07:00] Capital e7. This is another kind of ones in a way, like in terms of complexity that I don't think is necessary, like to get a first version out. Both are interesting, but I think the where we are right now is it looks like those are- should be considered like at a later point, given that they're not strictly necessary for a first version. I keep repeating myself, I'm sorry. So I mean, I feel like part of what you're saying is [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), which is already fairly complex, is simpler than [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md), that some of the things, the features essentially that [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) suggests could be added later. Is that correct? That was always the approach. We are trying to do capital eight during the minimal set of things we could do to get emm's out. And then the question, the + +[08:00] Tension is already always: is that something that's actually shippable or do we, are we going to run into something that makes it basically non viable like so, for example, the. I think one of the concerns around- are we going to cause the network to grind to a halt, let's say, because we have so much arbitrage, or, like the markets become so inefficient that things are just unusable? Like those? I think we're raised on the mailing list and I think at this point it's just like we don't know, yeah, and I think yeah, maybe, yeah. I would say like I think I don't. I haven't heard a disagreement, for you know [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) being on the path to [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md), as in like we would basically have to implement [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) before we implement [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) anyway, and so that the really the question is just like, does it make sense to go all the way to 37, or can we release 38 in the meantime? + +[09:00] And that really the only arguments I've seen for releasing, for going all the way to 37, are that the arbitrage thing that nicola just mentioned, and then also the fact that the you essentially need a new UI to interact with [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md), where you kind of don't interact with [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), or sorry, you need a new UI to interact with [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), but not really in the UI act with [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md), because basically [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) really only supports path payments to take these, liquidity offers, and that they aren't really patterns aren't really well supported in the ecosystem. I don't think I mean definitely not like Stellar acts and things like that. So but yeah, so just basically. So really, the question here is the danger of arbitrage and the kind of the burden of having people like change their uis is that great enough to warrant core going all the way to [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md)? And, like I said on the mailing list, I + +[10:00] And like I said on the mailing list, I think the arbitrage thing is better addressed in a different way, because we're going to have arbitrage even in [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) world and I think the UI thing is unfortunate, but it just seems way better to get this out earlier and then we can proceed to work on some like more cohesive UI, like later. Basically, if it seems worth it, so yeah, but I don't know if anyone disagrees, like, does anyone actually disagree here? So I mean, I guess the question that I have is: so I understand that a UI, that's something that can be solved later. I and I guess my question is about arbitrage. It seems like the risk with going with [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) is that it would open up new possibilities for arbitrage, and your argument that you made on the mailing list, Jed, is that there's a, there are better, it's better to deal with that as a separate + +[11:00] Issue, but I guess what the question: is there a way to actually understand the arabic, the risk that arbitrage poses? Is it a real risk, right? Is there a risk that implementing [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) would create so many arbitrage opportunities that would have an effect on the network? Or do we feel like that's not actually something to worry about in the short term with cat38? And basically, why, I can take that jet if you want. Oh, like, I think there are like two type of problems right with that comes with arbitrage. One is like actually having this being a inefficient market, but arbitrage is correcting that. So in a way, maybe it doesn't matter. Like. It just makes the. It creates those weird like change in price on certain pairs, but you know, at the end of the day, like those, the mms end up being + +[12:00] Balanced correctly. So it's not that. I think that's not so much of a problem. Maybe there's like the- yeah, like you have a larger spread in a way, like for like a little bit, like things like that. I think the one of the main concerns, I think came from more like the amount of traffic that potentially gets generated by this, by more arbitrage activity. So, for example, more recently, we had like a lot of relativity, for example, on the market, like in crypto in general, like what was it like maybe a month ago, or something like that. Well, it was like crazy. And when was when this was happening? We had actually a lot of ledgers on the network that were with a lot of failed transactions, and I think that could be the type of, + +[13:00] And I think that could be the type of things that happens more often if we have, like this discrepancy between the DEX and AMMs. The reality is that, with the work that we did in the last year, this type of so the impact on the network is actually kind of not that bad. I mean it's it doesn't like a year, like the same question a year ago would have been okay. If we have like this type of arbitrage spikes and whatnot, we were actually seeing like certain ledgers being impacted, like sometimes we had like delays of like 10 seconds, 15 seconds on ledgers like this was not acceptable. So we found ways to fix that. But we had a good stress test, I guess. So, yeah, like less months and you know, like, or like a, like a good. What was it like a good month? I think, like in April basically, + +[14:00] And yeah, the network was fine. The annoying thing is that we have dancering systems, have all the straight transactions and that I think we have to be mindful of that. I think that's kind of the thing for me. That is where this is potentially annoying, but that's something that we can improve of our time. I think it's like, for example, Horizon: right now, there's quite a bit of overhead, even for failed transactions. I think we can probably do a quite a bit better, and we have also similar ideas with in core. I think we have to deal with that regardless, because we have, like some historical data already with a bunch of failed transactions and I think when people want to rebuild historical data, I mean like the meta, for example, based on those old ledgers- I think we want to be to do that efficiently. So I think we will find ways to make this better overall. Yeah, to me + +[15:00] It seems like it's it like if arbitrage goes up a lot. Yeah, certainly, it'll certainly be a nuisance, but it doesn't seem like it. There's a way for it to really be catastrophic. I mean, there's lots of- you know- guardrails and checks in the network already that would prevent it from doing that. But yeah, like things like catching up to the network or running Horizon could potentially be like more load than you would, we would want. So I think we should deal with arbitrage. I'm not convinced it'll. Like I said in the 90s, I'm not convinced that going to 38 will greatly increase arbitrage, unless it just greatly increases the number of people using the network right, which is we want right. So, but yeah, but it doesn't seem like something that we have to fix before we release. 38 is my point? Like I think we can release it, and if arbitrage goes up, then we'll certainly address it. We need to address our arbitrage regardless. So we should be working on that kind of in parallel. It's kind of my thoughts, I mean just kind of for the record, you know, I know this isn't on the table. So my preference in all this, you know, I + +[16:00] Think the real priority is to not let the perfect be the enemy of the good in this project. So what I would you know, my ideal solution for an AMM would be: You know as fast to deploy, to implement a deploy as possible, and it's easy to kind of upgrade or back out. You know when we decide we want to do something better. And so from my point of view, both [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) and [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) are a little bit too linked with the order bike. I mean I've talked to some of you privately. You know my ideal world. It would just be like completely separate set of operations and orderbook. It would be even a separate phase of like a transaction processing. So I'm not seriously putting this on the table, except to point out the fact that there's two ways to reduce arbitrage. One is to make the arbitrage expensive because there's a fee, and the other is to make it not risk free, right. So if you know, if you did have a separate kind of transaction to take advantage of the mm + +[17:00] And you couldn't take advantage of the orderbook in the same transaction, then there would at least be some risk that, like you would buy something and not be able to offload it the way you thought you were going to be able to offload it. So well. I think that you know sort of where we're headed is something that's like a bit more integrated with. Like you know that works with legacy transactions. You know, if we really are concerned about arbitrage, we should also consider the ability just to make it not risk free to kind of not allow the things to happen in the same transaction. I think that can you guys, can people see me, hear me okay, yeah, it was just a weird like silence. + +[18:00] Yeah, okay, cool kind of like for me, the extension of what David's saying, and you know I'm one of the people who was talking that he said that he was talking to, about what he was saying, but like there are all these kinds of trade offs that we're making today, you know what's the simplest thing that we can implement that you know people will readily adopt. It's kind of the way that I've been looking at it and so going like one step simpler, as David was saying, and you know, introducing new operations that completely decouple it from the transactions even that happen at a different phase, for example, yeah, that would be, Yeah, that would be even one step further. This would be a real, I think a even bigger hindrance to adoption, even than you know what judd was pointing about how path payments aren't widely adopted in the ecosystem. But I think the kind of bigger picture on doing anything like that is no matter when we were to like imagine we were to do what David was posing or anything else + +[19:00] Considerably drastic like that, would be such a disruptive change that you know, like what would be the difference if we were to make a really disruptive change like that now. Or if we were to deploy something that we're ready to deploy now and make a really disruptive change like that in six months or in a year or whatever. Having something working in the short run wouldn't really change my calculus, at least, on making further changes like that. You know, we'll have this new data, we'll have all this new information about what's good, what's not good, and then we can build a system that's potentially better in various ways, maybe ways that we don't even realize now, and there's not really anything stopping us from making those changes later. Yeah, and just in case it wasn't clear, like I did end up being sort of persuaded by this argument, so and again. Like I think that the fact that [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) is the potential retrofit onto [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) again speaks to the ability of [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) to be like a reasonable first step that's not + +[20:00] Overly limiting what we're gonna do in the future as well. So I mean, so it sounds like people are generally saying: okay, [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) is a reasonable first step forward. It's simpler, it could we could add [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) to it later. We could explore possibilities to sort of deal with arbitrage in various different ways. But it feels like a reasonable first step forward so that we can implement it and get moving, offering and then start to gather information and do it in like a sort of first step way that can lead to other things. And I mean it feels like that's what everyone's saying: start simple, implement something now, see what happens, know that we can retrofit things to it later. And I guess my question is: does anyone have a problem with that approach or does that approach just generally make sense? + +[21:00] Because I guess then I just wonder if we are at the point where it is time to start to basically move [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) forward in the process, and what that would look like again just to like sort of remind people of the process is that right now [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) is a draft. There is a CAP committee, all of whom are on this call, who could vote to approve that draft and if it was approved, we'd move it into Final Comment Period. People out there, anyone would still have an opportunity to comment on it during for one week, on the Stellar dev mailing list and, depending on what happens in that week, you know, we can move it into accepted and then start to talk about, we start to work on an implementation with an eye towards including it in the next protocol release. I mean I guess I feel like we're at the time when it may be time to just sort of vote on whether or not to + +[22:00] Accept [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md). Does anyone object the idea of doing that? Is there any blocker? One question I had about [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), as if things do turn out to be bad, how do we decide in a principled way: this is time to pull the plug or pause the AMM or something like that. I know we have a mechanism in the CAP to turn it off. But what would be the trigger for that? I guess bad in what context here? Bad as in, like, we misjudged, you know, the arbitrage, or something like that. Or bed in the context of like, oh, there's a bug, because this is complicated. Yeah, that's kind of what I'm getting at. Like maybe there's a bug. That's like one extreme of bad. And then there's sort of this in between bird where we have a lot of arbitrage and Horizon is struggling to read all these failed transactions, but it's sort of okay, but maybe we should turn it off because it + +[23:00] Would be better. We'll be. There's a possibility of a grey area where we'd have to think about that. Do we normally think about that now or do we want to think about it? Then I think we have the tools for that. I think, like the talking about the gray, this grave thing is kind of funny because it's also the more it's unclear if it's bad, the harder it's going to get people to vote for the change right? So, yeah, I think it was. It would have to be you know more only extreme, like you know there's like this security issue or whatever. All right, that basically nobody's going to disagree with voting for a change to fix it. Like fixing it short term, maybe, turn it off, and then of course it would be like turn around and you know a new protocol it is. It's just to be clear, right. If this situation unfold, like basically, if validators agree to the + +[24:00] Protocol change that includes this [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), and then later it turns out that something is wrong, then a step to sort of undo that or turn it off, that would be up to the network as well. The validators would also have to vote to accept. Whatever the solution was. That's correct, right, yeah, and that's why, like it's, the less it's a hot, you know it's a home run, that it's something you know good or bad, like the harder it's going to get- be to get actual adoption. I think the only chance of you know, pulling the plug, hitting the button, whatever you want to call it, you know, getting that level of unanimity, as nikola was saying would be, in the event that, like you know, there was an obvious way that people were going to lose money. You know, if we were in that kind of a situation, I think that it would not be hard to convince validators like, okay, let's not let that happen. I think, short of that person, I have a hard time imagining that people would vote, you know, + +[25:00] To turn it off, and it'd be more likely that we'd be in a situation where we're under some pressure to make some improvement and release a new version of the protocol in a shorter time maybe than otherwise. It's also worth mentioning that we got the communication channels established with the validators and you know, time and time again, the validators have proven this out themselves. You know, super responsive and responsible. So you know, if something bad does happen, I believe we can get that effort together pretty fast to make a change. So, given all that, it seems like we're landing in a position where we say, okay, if something does go wrong, if it's something is clearly wrong, then we can work with the network and we believe that the network is full of reasonable, responsive validators who can help sort of get things back on the rails. + +[26:00] Not that we're anticipating that to happen, but just like we're really getting into like the worst case unhappy path scenario here. Yeah, I know it's all about setting low expectations, that we're always happy of the outcome, no, but I think eric's concern is actually the same than you know what people raise right with. You know, if you have a lot of arbitrage, maybe fees go up like more than they should. It's annoying. I think it's, yeah, it's a nuisance and in that case I think we'd have to probably like figure out, you know, in a more accelerated way, like mitigations for arbitrage. Yeah, I think that's kind of. Yeah, exactly, there's two concerns, right. One is fees going up and the other one is that number of fair transactions spikes in a way that is very large and starts to cause us performance issues, and so, from my perspective, the risk is the second one and I want to get ahead of that. So I think my takeaway from this is I should- accelerate plans to deal with that + +[27:00] Possibility now, but there is a there is some risk that this will happen in real time. Right, but we have a few months and we're working on this already, as you say, and the thing is that you were already processing today, like the steady state, non failed transactions is over 100 transactions per second, so you know it's already substantial. Yeah, I mean, as the baseline of successful transactions goes up, that increases everything for Horizon, as we discussed Nico, but the number of failed transactions is potentially unbounded. Right in a ledger? No, it's not, it's bounded by. You know the whatever setting we have right now, it's a thousand, their pleasure. Yeah, they count as part of the legend limit. Yeah, so people would have to vote to increase that, which you know they could do today. Anyways, also, let's not forget + +[28:00] That there's an optimistic scenario here, which is that there's a lot of arbitrage but the AMM works really well. So the actual solution is we deprecate the orderbook and we just kind of convince the ecosystem that it's better not to place offers in the orderbook- an even happier path. Perhaps people don't need to be convinced because they like the imm so much and they, yeah, exactly right. So one clear thing: right, we know that this proposal would create AMMs alongside the orderbooks, but we don't know what that's going to do to the orderbooks. Right, we just we don't know yet. Yeah, John, I think this is one of those like dynamic analysis type of things. It's very hard to predict. I mean we're all considering this kind of like. I mean, like we're engineers, we build stuff, and so we like to consider bad cases and make sure that we're not going to end up in really bad cases as a consequence, but like one of the things that we're like. Perhaps this does increase the amount of + +[29:00] Arbitrage. I suspect it will. At least a bit. I think it'd be hard to convince someone that wouldn't be a consequence of what we're doing, but like if it were to simultaneously reduce the amount of market making on the orderbook because people are just, you know, populating AMMs instead. It's hard to know which of those two effects will be bigger. Like it wouldn't be that weird to me. It wouldn't be weird to me at all. Actually, if the total amount of traffic on the network actually went down as a consequence of this, like averages, lower spikes or higher, is like a totally plausible outcome. Great, I mean, it feels like eric did that sort of. Address your concerns. Do you have more questions? The address is my concern. I have one other question. Following the doomsday trend of all engineers, what happens if this is wildly + +[30:00] Successful and everybody wants to join Stellar and join the AMM and deposit, pull shares and retrieve pool shares and everything else? Have we thought about that? I mean it works for unison. I mean we've got a lot of room to run here, you know, between us and let's say Ethereum but so, but I think that would just be the outcomes. Please go up right. Yeah, all right. Well, or the ledger limits could go up too. Right, there's still headroom. that validators have to increase the capacity of ledgers too, and we'd all be really happy because this idea would have gained traction and everyone would love it. I mean, if AMMs are super popular, I mean they require way less transactions. I think most of the transactions on the network now are people adjusting their orders on the orderbook, so that kind of all goes away. So we have even more room, if + +[31:00] In a m world. So any other questions, thoughts, doomsday scenarios that we want to play out: are we ready for a vote? I feel like we're ready for a vote and I kind of just want to do it now live on this call. But before I do that, I'll just get like real procedural and say: does anyone like object to actually doing a sort of live vote and what time to think about this, in which case we can conduct the vote asynchronously over email. Yeah, cool, okay. So then I think we should have a vote here to move [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) into Final Comment Period pending acceptance. Again, at the end of, there'll be a week for people to sort of raise questions on the Stellar dev mailing list and we'll post this new status change there and at the end of that week- you know well, we'll see what kind of comes in. Depending on what happens at the end of that week, we may move this into accepted, + +[32:00] And so you know, based on. Also, if anyone ever wants to check out this whole procedure, you can look in the GitHub CAP repository and it lays out exactly the life cycle of the CAP and you'll see that there are three voters and it requires unanimous consent from all of them to move the CAP into accepted, and so I will. It is Jed, David and nicolas, so let's start with Jed. Jen, what's your vote? Yes to accept, okay. Yeah, David, what's your vote? Yeah, I'd like to accept chapter 38. Great, nikolai, what's your vote? No surprise, you know I'm kind of yes, okay, cool, so we have three yeses and that means that [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) moves into Final Comment Period pending acceptance. I think that's it. Any other final comments, thoughts, questions that anyone has? Well, does the group want to provide some direction on [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md)? + +[33:00] Like, should [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) now be adjusted to be layered on top of [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md)? Yeah, I think that makes sense. I mean, I think, I'm not saying that we would implement this stuff immediately. I think part of it, is that we want to see what the what, how things play out, like maybe everyone moves to a m and we don't need to interleave these things and we can just skip that complexity. But likely people communities both. So it would be good to have [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) be there in case we want to go down that road, so that would be my take. So technically that means, or procedurally that means, [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) stays in draft mode and may be revisited in order to like sort of graft it on to [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) in the future. Yeah, I think there are some changes to [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) that would be worth making, because not all of the concerns in the CAP, make sense in light of how [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) is + +[34:00] Actually implemented. So there's some things that are written up there as well. Right, it would require revision. Ideally, some things get easier because, like the authorization logic or whatever can be, just you know can pick you back on [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) and then some things need to be rethought. There's also feedback. There's a few things like, for example, CAP, I think, to get closer to adoption, [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md) should not use floating point, because we like try to avoid floating point because it's like the arithmetic's not associated, which is kind of annoying. Again that easy fix. But just like, basically I would encourage [CAP-37](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0037.md), the- you know, revision to continue and to try to have a deployment path or a theoretical one of how this would work now that we're trying to move ahead with [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md). + +[35:00] Great, sounds like a plan. Anything else okay. Well, thanks everyone for coming, thanks for participating, thanks everyone who's watching. Again, we encourage you to participate as much as you want in any of these discussions about the future of the Stellar protocol. There are links to these particular CAPs in the event description so you can go read them to actually see what it is that we're talking about. And there's also a link to the Stellar depth mailing list, which is where we do things like notify people about the changes of status of CAPs, and it's also where all this discussion before this meeting happens about the CAPs. So again, it's open participation. We'd love to hear your thoughts and we will now move [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) into Final Comment Period pending approval. All right, and that's it. Thanks everybody. + +
diff --git a/meetings/2021-07-28.mdx b/meetings/2021-07-28.mdx new file mode 100644 index 0000000000..43c7d1746d --- /dev/null +++ b/meetings/2021-07-28.mdx @@ -0,0 +1,155 @@ +--- +title: "Stellar Ecosystem Panel - Fintech in Africa" +description: "This panel explores how fintech builders across Africa are using Stellar to address cross-border payments, regulation, and interoperability challenges while scaling inclusive financial services." +authors: + - joseph-benson + - justin-rice + - rick-groothuizen + - tori-samples + - wiza-jalakasi +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This ecosystem panel brings together fintech founders, investors, and operators building across Africa to discuss real-world challenges and opportunities in delivering digital financial services. Panelists share how they identified underserved user bases, why they chose blockchain infrastructure, and how Stellar enables faster, lower-cost cross-border value movement. + +The conversation focuses on practical lessons from operating in fragmented regulatory environments, scaling under uncertainty, and designing products that work for both end users and financial partners. Speakers also highlight why interoperability and collaboration will be critical to the next phase of fintech growth on the continent. + +### Key Topics + +- How African fintechs identify niche user bases (SMEs, refugees, informal merchants, cross-border traders) +- Using Stellar and stablecoins for efficient cross-border settlement and liquidity management +- Regulatory realities: compliance, KYC/AML, and engaging regulators early +- Building resilience amid sudden regulatory or partner disruptions +- Advice for founders: focus on core value, leverage existing infrastructure, and know your details +- Interoperability challenges across wallets, banks, telcos, and countries +- Why open standards and integrations matter for Africa’s fragmented markets + +### Resources + +- [Stellar Development Foundation Events](https://stellar.org/events) +- [DFS Lab](https://dfslab.net) + +
+ Video Transcript + +[02:00] Hello everyone and welcome to today's ecosystem panel on fintech in Africa. I'm Justin, a VP of ecosystem here at the Stellar Development Foundation, and today I'm joined by four esteemed colleagues- Weeza from Chipper Cash, Joseph from + +[03:00] DFS Lab, Rick from ClickPesa and Tori from Leaf Global- who are all here to discuss the fintech landscape in Africa. So they're going to talk about the challenges and the opportunities they see on the continent and in the world in general. They're going to tell us about their companies and their personal journey journeys- and hopefully they'll even share some advice for budding entrepreneurs. So it's gonna be a fairly freewheeling discussion, but we will do our best to incorporate audience questions when possible. So you'll see that there's a Q&A button on your screen. Feel free to use it at any time to submit a question. Before we kick off the discussion, though, we will take a few minutes to allow each panelist to introduce themselves and to give a little context about who they are and what they're up to. And I'll start as I mentioned. I'm Justin from the Stellar Development Foundation, which is a non-profit organization that seeks to support the growth and development of the Stellar network in order to increase equitable access to the world's financial infrastructure. So at SDF, we believe Stellar has the potential to have a significant impact in the world, and we've also seen a Ton DFS Lab, Rick from ClickPesa and Tori from Leaf Global- who are all here to discuss the fintech landscape in Africa. So they're going to talk about the challenges and the opportunities they see on the continent and in the world in general. They're going to tell us about their companies and their personal journey journeys- and hopefully they'll even share some advice for budding entrepreneurs. So it's gonna be a fairly freewheeling discussion, but we will do our best to incorporate audience questions when possible. So you'll see that there's a Q&A button on your screen. Feel free to use it at any time to submit a question. Before we kick off the discussion, though, we will take a few minutes to allow each panelist to introduce themselves and to give a little context about who they are and what they're up to. And I'll start as I mentioned. I'm Justin from the Stellar Development Foundation, which is a non-profit organization that seeks to support the growth and development of the Stellar network in order to increase equitable access to the world's financial infrastructure. So at SDF, we believe Stellar has the potential to have a significant impact in the world, and we've also seen a Ton + +[04:00] Of interest in Stellar in Africa, which is part of why we're hosting this ecosystem panel. Our goal here is to explore the experiences of aspiring companies that are delivering digital financial services across the continent, but also we're seeking to encourage those companies. So, in addition to this panel, we're also launching the Stellar blockchain bootcamp with DFS Lab in September. We'll talk more about that at the end of the hour, so stay tuned now. I've mentioned Stellar a few times now. To make sure everyone knows what I'm talking about, I'm going to use the rest of my intro time to give a quick overview. What is Stellar at a high level? Stellar is a decentralized, open network that connects the world's financial infrastructure. It is a global public blockchain built for interoperability and to further financial access and inclusion, and it does that by making money more fluid thanks to its speed and scale and ability to represent and transfer value. It also makes markets more open because when financial infrastructure is connected, you have greater geographical and affordable access to markets and ultimately, it empowers People + +[05:00] Because it gives them real ownership of the value in their possession. So Stellar is a platform that allows businesses and developers all over the world to build solutions to real world problems, and many of the businesses represented on this panel do just that: they build on Stellar. So enough about Stellar. I will move on and let each of our panelists introduce themselves, starting with Wiza from Chipper Cash over to you. Thank you, Justin. Hey everyone, my name is Wiesa Talakasi. I am a Malawian living between Nairobi and Johannesburg. I am the vice president of global developer relations at Chipper Cash. Chipper Cash is a mobile wallet, the largest smartphone only mobile money service operating on the continent, with coverage across eight different African geographies as well as the UK and the United States, and we enable about 4 million users, all of our jurisdictions, to send Each + +[06:00] Other almost a billion dollars each and every year. We've got basic peer repair functionality, both local and international, depending on the jurisdiction that you're in. We also enable users to get virtual and physical prepaid debit cards that they can have tied to their Chipper wallet, as well as purchasing some crypto assets, namely Bitcoin, Ethereum, USDC. A few more in the pipeline as we make our crypto trading platform a bit more robust: fractional investing in U.S. Stocks, air time and data bundles across the board, as well as various kinds of bill payments for their respective jurisdictions. We're a young company, three years old, but we've managed to grow pretty quickly over the last year and a half. Right now, at 160 people, we're building on Stellar's solutions to explore the viability of cross border settlement with African stablecoins. A lot of the cross Border trade + +[07:00] That takes place in Africa is intermediated, somewhat unnecessarily, by the U.S. Dollar, and we think that we can take advantage of these stablecoins to facilitate inter Africa trade in a much more efficient and cost effective way. Fantastic, thank you. Next up, we have Joseph from DFS Lab. Joseph, yeah, hi, thanks, Justin. Hi everyone. I'm Joseph. Head of entrepreneur support at DFS Lab is an early stage investor in companies for custom digital commerce. We typically invest in pre seed and seed stage, you know, anywhere from twenty five thousand dollars to fifty thousand dollars, and we work with founders to help them understand their markets, help them grow their users and also help them, you know, raise bigger rounds in the future. Over the last five years, we've invested in companies like Ochova, pula suppliers, + +[08:00] One pipe and a bunch of other you know companies. We also run software and capital, which is at least like basically an angel club that allows seasoned and new investors to invest. You know, anywhere from 2500 upwards in companies were also invested in. So, yeah, thank you great. Next up is Rick from ClickPesa. Rick, thank you, Justin. Hi everyone, my name is Rick. I'm the co-founder and director of ClickPesa. ClickPesa has a company operating in East Africa since 2018. We provide a toolbox of digital tools such as invoicing, CRM and e-commerce solutions, combined with payments to SMEs and freelancers in East Africa. ClickPesa is active in the market- Kenya, Tanzania and Rwanda- + +[09:00] Helping local businesses perform better to increase productivity and profitability. Our mission is to simplify doing business for companies in or within East Africa. We've been building on Stellar since 2018, whereby last year we became an anchor on the Stellar network. ClickPesa is a small company, around 10 people, whereby most of us are operating from Tanzania and excited to talk further about Stellar in this panel. Looking forward for this, our thank you- fantastic thank you. And finally, from Leaf Global, we have Tori- take it away. Thanks, Justin. Hi, everyone really excited to be joining the Stellar community again today. You all have been such a source of support over the years, So happy to be with you. My name is Tori Samples and I'm the co-founder and CTO of Leaf Global fintech. I am based in + +[10:00] Kigali, Rwanda, and Leaf. I am based in Kigali, Rwanda, and Leaf currently operates in Rwanda, Uganda and Kenya, with goals to expand across the continent and into other markets eventually, such as Latin America and the Middle East, and Europe as well. We are a digital wallet that allows people to safely store and transport their money across borders. Different than other digital wallets, though, we really operate down market. We started with refugees and have expanded into cross border traders and informal merchants- people that don't have access to the formal financial system. They don't have bank accounts. They probably have a mobile money account if they're in East Africa, but that's fairly limited in that it's very expensive and difficult to cross networks and so Leaf allows them to store their money over time in different currencies, to send and receive peer to peer with friends and family and other countries for free, and they can cash in and cash out from any mobile money number and the countries in which we operate. We've also started adding in things like airtime purchase and value add services into the Wallet, and the best part + +[11:00] Is that it's accessible over basic phones, so you don't even need a smartphone to use Leaf. We use a technology called USSD that brings the powerful Stellar-based backend- you know, all built on blockchain, into this really low tech environment where people don't need to know that there's a blockchain behind the scenes, they don't need to understand crypto to use our stablecoins and they don't need to have a smartphone or data connection, internet connection in order to use our services. Fantastic thanks, and thanks to all four of you for joining. I think so. At this point, we're just going to sort of move into a discussion and I think again, anyone who's watching feel free to submit questions via Q&A We will incorporate them into the discussion when possible. And I think generally you know we're here to sort of talk about the fintech landscape in Africa and to talk about these solutions, to talk about sort of observations that the people on the panel have about what's going on in the world. And I think I kind of want to start Broad and I think the + +[12:00] Question that I and I think the question that I actually want to start with is: each of you have a product that incorporates blockchain, that targets a specific sort of user base. How did you identify that user base and how did you decide that blockchain was a good solution to sort of offer them? And I guess I'll, I know that this will become a more freewheeling conversation, but I think, to start with, I will just call on Tori, since she was the last to speak, sure and we might be the most niche market play of everyone represented today, so happy to start. I mentioned that we started with refugees and this became apparent to my co-founder not Robinson and I- as we were really digging into the refugee crisis before looking at the technology side. So we identified that there was this gap in the market where up to 80 of the cross border market in Africa is still cash based. It's informal, so people are crossing the border with cash in hand, which can be very dangerous, but then it also is just expensive over time, It's inconvenient and it doesn't allow people + +[13:00] To move into the formal economy over time. So we saw a significant gap there and in looking at ways to formalize that and to do it with a sustainable business, we saw blockchain as a really good option. This was early 2018, so the landscape was a little bit different, but we knew from the bat that or right off the bat, that Bitcoin and Ethereum were not going to be a good fit for what we needed? Because we knew that we would be dealing with small transaction sizes and a customer base that didn't have 10 minutes to an hour to wait to know the status of their transaction. And so, as we started digging into different solutions, we found Stellar through a contact of ours actually in Mexico and we found that it was a really good technical fit. And so we did look at other blockchains for sure and eventually found our way back to sellers, started building out the wallet and during that time it was interesting to see how the regulatory landscape changed as well and the attitudes towards blockchain. So, if anything, I think over the last few years + +[14:00] It's become an even better fit for us because Stellar makes it so much easier to work with regulators and central banks because of the stablecoin approach that we're able to take, using our own custom assets acting as a by proxy anchor for our wallet customer base. So we also we work with licensed entities to hold and move funds, so that makes it easier as well. But overall, I think the approach was: identify the problem, identify who it touches and their unique needs and then find a technical solution to fit that need. Great, does anyone else have? Like I mean, there's a lot of interesting stuff there, but before we sort of move on to talk about regulatory questions and things like that, Does anyone else have like something that they can say or perspective on how they selected their user base or how they decided to explore blockchain as a solution for problems that their users Face? How about? So let's move in + +[15:00] There. So on our side, we don't actually expose a lot of like blockchain functionalized to our users, except for the assets that we will we allow them to buy and sell and send and receive in the app, and we're adding a bunch of Stellar assets on there very soon, but like it's more of our operational backend, right. So you know, in a good month we'll be moving anywhere between 80 to maybe 150 million dollars a month across African currency pairs. It is very expensive to try and do a conversion to us dollars and then like convert us dollars back into whatever like destination currency that you actually have. So like I can give you an example: one of our most popular corridors is Uganda and Nigeria, both ways right. One might not typically imagine that there's quite a bit of cross border trade that takes place between those two markets, but our data seems to suggest otherwise. So we can find ourselves in a Scenario where today we have way too + +[16:00] Much naira in Nigeria and the other day we have got way too much ugandan shillings in Uganda. How do we quickly move money around across these currency pairs such that we are not unnecessarily losing money by doing unnecessary conversions to the us dollar at typically unfavorable rates. So you'll find that, like most forex bureaus, bureau de challenge banks will not have like an exchange rate for the nigerian naira to the ugandan shilling. But if you look at stable coin pairs, you can find either a direct the conversion rate between the two currency pairs or a currency pair that's like a lot more cost efficient than translating to the us dollar, typically something like USDC. So it started making a lot of sense to us very er early after encountering this problem that stablecoins might be the way to go here. The biggest challenge, of course, has been around getting our partners- and we work mostly With banks in many markets- to be + +[17:00] Able to move money around. Getting our bank partners to have a high degree of comfort around stablecoins and how those stablecoins are backed has been a bit of a challenge, though we've been able to make a lot of headway and I think the future is promising what? What's most encouraging for me about it is that once the regulators and other stakeholders start to understand the cost benefits of using this approach and once they see some examples at scale of other countries and other jurisdictions where there's been a common sense regulatory framework that has worked and has not collapsed or created some degree of chaos, it's very easy for other countries in the region to adopt and copy that sort of model. And I think it will be a much more different landscape in the next three to five years across the Continent. Great, I mean. Yeah, for us + +[18:00] It's been quite. We also don't really showcase it to the user, as in that, since it's a similar situation and similar reasons as Chipper Cash and I believe I wanted to add there that one of the reasons we chose the Stellar blockchain- is also the. I mean obviously, the division of the Stellar Development Foundation, which we felt very aligned with, but also a lot of action, which is also quite important. So I think SDF has been over the last couple of years, put a lot of effort behind building a good ecosystem. So, whether it be anchors, wallets, Traders, more and more parties are coming + +[19:00] To the ecosystems and the operations that the Stellar blockchain offers, therefore, are getting quicker and more efficient, and it also makes it cheaper, and that all in a decentralized way. One example is, for example, adding of the USDC Stellar asset, which was added earlier this year. These are all great developments that make the ecosystem much better, which benefits everyone that interacts with the ecosystem and, yeah, that's only getting better. So that has also been one of the reasons for us that we went for the Stellar blockchain- Great- and we're getting some good questions in the Q&A too, and one of them sort of touches on something that's come up a few times, which is basically regulations. And the question from Peter is: how have the participants found complying with the travel rule, Slash wire transfer regulations and demonstrating this + +[20:00] To regulators and banking partners? So, question about the travel rule and wire transfer regulations: are there challenges in complying with regulations? Is that one of the big challenges that you face? I guess, Rick, maybe just you can answer that one, since you're on the spot, still sure, Yeah, the countries we are operating the cryptocurrency regulation is all very young and non existing, so we've taken quite a pragmatic approach to it and you do the trial and error approach. So, for us, for all use case, we are an anchor on the network, issuing tokens which people can buy or sell And that fitted for us in existing + +[21:00] Regulation. And, yeah, to be able to buy and sell, you need a bank partner. So we also regularly like sit with them and see how to adjust, how we approach regulation or compliance or AML around this. Does anyone else want to speak about sort of regulatory challenges or just regulation in general? Victoria, looks like you're ready to go sure yeah, and only because we get to have this conversation like five times a day with everyone. Right, it's the first thing that anyone asks about when they hear what we're doing, especially if we mention refugees, just because there's a stereotype there. But we actually we put all of Our customers through stringent KYC and AML + +[22:00] Checks at onboarding and then they're able to do transactions within the system. But of course, there are limits on that. So I think one of the advantages of building in Africa, is that widely there has been some regulatory treatment of e money in the last 10 to 15 years, starting with in pesa and Kenya and expanding into mobile money and mobile banking across the continent. And so when we approach regulators to say: this is what we're doing, this is the approach that we're taking. This is a digital representation of value that is stored and held with licensed financial institutions in fiat behind the scenes. That sounds familiar to them because it sounds it fits the framework that they have for mobile money, for example, in East Africa. So I think that we've found a lot of success using that approach, and it's definitely part of our growth strategy to identify areas that the regulatory treatment of Leaf is going to be Favorable. That's I think that is changing. + +[23:00] That's I think that is changing globally. And it's probably going to be a while before every country has explicit regulation on blockchain or crypto based digital wallets. But I think in the next five years there probably will be some sort of framework for e value generally. And so, again, the way that we do that, is very important for our long term success. It's not just because there's a framework here or anyone can succeed. I mean we've put a lot of thought and intention into the design to make it work for the existing frameworks, the potential for change over time and working within the maybe informal political relationships of the existing landscape, because it doesn't, At the end of the day, it might not matter only what the law says. It matters who's already in the space and how, what they're doing, how they're being treated, and so making sure that we can play nicely with licensed commercial banks, Telcos, psps, payment providers or payment processors, + +[24:00] That's all very important to us and so our solution is very much meant to play nicely, so that we can get traction with existing financial institutions and regulators, because if it only works at the tech layer and it doesn't work in the real world, then it doesn't work at all. So I think that's been our approach and all of that, and it seems to be working well so far, at least in East Africa. Yeah, so just to follow up on you know, tory and Rick, I'm was seeing like more well, I say kind of opening up rights of regulators. So in Nigeria, where I live, for example, there was the brush, you know, kind of ban on just general crypto Activities and almost an extent of, you + +[25:00] Know, making it almost, in a way, criminal. But then earlier this month, we've seen the cba and the central bank say, you know, they're working on, you know, a central bank digital currency, which is which, to me, is kind of positive. It's to me, it's, it seems to be like there's some engagement with stakeholders and then they're beginning to understand, you know, how this works and I expect that we'll continue to see that across, you know, multiple governments, across the continents. It might not be like easy over, like and as smooth as we would expect it to be, especially in countries where you don't have the framework of mobile money or a money you know for context, right and so Well, I believe it's going to be with the work that is being done by, you know, most, lots of the companies, like chipakash global will, you know, will probably see more friendliness from regulators over the coming Years, without any insights on regulatory compliance + +[26:00] And the challenges involved. Hopes, plans, dreams- Yeah, I guess there's two parts I'd want to think about. The first was in reference to, like, the travel rule. So we've been fortunate enough that, like a lot of our transactions- for under the FinCEN threshold limit, which was previously, I think, three thousand dollars, is now being proposed to be as low as 250- a lot of our transactions are typically below that amount but higher in frequency. So, you know, you don't have to be as stringent when following that, though we do have, like, the infrastructure in place to be able to pass on all of the relevant parameters regarding the transaction, so you know the address, the name, transaction amount, the stated purpose, etc. So we can pass that Programmatically as parameters to our payment partners + +[27:00] When they're processing, so they have a record of all of these informations as and when the transactions are taking place. So it's actually not that hard in terms of, you know, being compliant. What is challenging is that the regulatory frameworks are, you know, highly fragmented across markets, so you need to set up like a separate compliance structure in each and every country. Like, of the 160 people that are working at your broadcast today, about 60 of them are in the legal risk and compliance team and you know this is like a very software heavy service, like we've got like 40 engineers. So you can imagine how seriously we take that And we found that, like you know, engaging the regulator early, A lot of the times we have this almost perspective that the regulator is not pro innovation, they want things to say the certain way or that they don't understand. I think that's not the right attitude to approach conversations with the regulator. Like there are people just as much as you and me- I'm sitting- in the office of all of these Central banks, but then their incentives are + +[28:00] Very different from ours and what is a the most useful approach has been demonstrating technical mechanisms which satisfy the requirements and the interests that the regulators have. You know, we've been able to set up this cross border wallet in south Africa, one of the most strictly regulated financial services markets in the world by taking the surprise to say like, okay, guys, we want to do this thing. We know that no one has done it quite like this before. But how do we make it happen? As opposed to, the law says we can't make it happen. You know so. I think by taking that fragmented approach and investing in those relationships early, you'll be able to set up and scale quite quickly in some of these markets. There's another question that was asked by afolabi in regards to crypto. Saying for crypto related companies like chipper, cash will allow users to use btc on app. How do you comply with local regulations? So it's about understanding what the state of the regulation is In that market and adapting as and + +[29:00] When things change. Before the central bank of Nigeria directive, we allowed all of our users in Nigeria to transact. We've since disabled that functionality for those nigerian users in order to comply with the new regulation. In markets like Uganda and south Africa, where you don't have an explicit prohibition of crypto, we're able to run the services and we are actually trying to help to formulate some common sense frameworks that enable the regulator to regulate crypto effectively and even in markets like Nigeria. It's important to note that the prohibition was it is not illegal to own or hold cryptocurrencies, but the central bank did instruct centralized exchanges to not connect to the centralized banking systems. So there's been very interesting ways that people have gone around that sort of like setting up digital and physical agent networks, and we're exploring some of that in some of our markets to see whether it Might be a viable alternative. Yeah, it's + +[30:00] Interesting. I mean, one thing that I'm thinking about as you're telling that story is that you have to be able to adapt to sudden change. Right, you don't necessarily know what's coming next. There's a lot of uncertainty and as new information comes in, you have to sort of figure out where to steer, and you have to be pretty nimble. So I guess one thing I'm curious about from all of you is like what is the sort of most unexpected thing that's happened as you've been building your company, And how did you respond, Tori? Do you want to go ahead? I saw you come off me. No, go ahead, go for it, thanks, yeah. So there have been like several scenarios where, especially in our earlier days, when we were like operating in certain markets and we didn't have a very much for compliance structure, you know we'll be having the time of our lives- all time highs, incredible volumes- and then your Partner processor just calls you and says + +[31:00] We've received the cease and desist letter to shut down your services immediately. Like, and it's like at 2 am, right and like there's no explanation, there's no negotiation. There's like, yeah, you can come and talk about it next week, but, like, right now we're seizing and assisting this because we don't understand exactly what you're doing and this happens. This has happened to us quite a lot, especially in the year 2019. It's really been a function of not like really having the right relationships and having the right structures in place to manage these adverse events as and when they occur, something that is a bit counterintuitive. It's a bit controversial, but I'll still say it anyway. It's my personal belief that a lot of the people in power- sometimes the people in positions of power- They might not be there because they, like, meritocratically earned that position right. They're not like the absolute smartest people to be sitting in that role due to some other factors. Maybe there's like the president's cousin and whatever else They're sitting in some you know Really important position in a central bank + +[32:00] Or something. Now, that individual does not necessarily have the exposure and acumen to be able to assess the viability of your business purely on marriage, right, and that is like a fundamental reality of the structures and institutions that we have in Africa today. A lot of people seem not to pay attention to the fact that African republics, as an idea, are about 50 to 60 years old and you're applying frameworks and regulatory structures from countries that have had institutions for hundreds, if not thousands, of years. It doesn't really work the same. So in a scenario like that, in that type of environment, it is really important that you know the right people in every country, such that when you get flagged for doing something, those people are able to make a case on your behalf to help the other people who might be in positions of power to understand that you're not actually trying to break the law. You're trying to introduce a new level of innovation which is compliant, which is actually helping some of the government and the state actors to achieve their intended targets. And that is a lot easier said than Done, but I think you know investing + +[33:00] In, but I think you know investing in relationships and making sure that you're always communicating preemptively has been very useful in helping us maintain this type of business across the continent. I'm gonna jump on that because that's our experience, at least the first part as well. When somebody, a partner, just calls and says: we noticed this growth on your system and it's become big enough that it's been flagged by our partners and now we're gonna have to put a hold on everything until we understand exactly what's happening. And the reaction, of course, on our side is: well, we've been through these conversations. This isn't- this is not a surprise. It's great that we're growing that fast, that we, you know, caused a flip in your entire system. But that's that shouldn't be the conversation we're having today. And so I think that for us it's been a matter of intentional duplication within the system to make sure that when that happens with one partner, we can very quickly adapt and shift to another. And so Making sure that we are truly an + +[34:00] Integrations hub with lots of options to keep our operations going and to provide flexibility for our customers is really important. And then the other thing that I would add: I think the relationships point is spot on. Those always help, no matter where you are. But also I think there's this gap where, again, it doesn't matter at the end of the day, well, it doesn't only matter what the law says or what the regulations say. It matters who is looking at those and how they're interpreting them. And so we, before we go into any market, spend a lot of time looking at the regulatory framework and what exactly we can do and can't do, and we make sure to comply with that, because we want to be able to serve customers and we want to be able to operate and we want to stay within the law. but if you get somebody who is not in a position where that's part of their day to day job and they don't understand exactly what a blockchain Is, Perhaps behind the scenes- that's always + +[35:00] A big buzzword. You know they can set people's risk factors off, and so if that person is the one who is tasked with deciding somebody's fate, they usually are not the ones who want to stick their necks out on the line and so making sure that we understand exactly what the law says and we're able to communicate that effectively to people who probably should know it but really don't, has been a big challenge for us, and I think we're getting better at it, but we still come across it all the time. Yeah, it's interesting. I mean: well, Joseph or Rick, do you have anything to sort of add on this topic, Anything about the biggest surprises, how you've dealt with them? Now, for us it's been similar examples, similar fears of shutting down an account. And I think, especially for sort for fintechs in, especially in Africa, is that it's quite important, when + +[36:00] The product is evolving, that you keep flexibility in your product and because, yeah, those things are unfortunately there and, yeah, if you can't respond quick enough. That means that, yeah, it can kill your business for a while. So flexibility is definitely something you need to have as a fintech. Yeah, definitely so. In an area for my life I was the co-founder of the fintech, and these experiences are very like, very similar, where you have partners just basically in you know, in our case, you know we, our customers, just couldn't do a transaction anymore, a certain kind of trans transaction anymore. And then you know we reach out to our partners and like, oh, we, you know we cut you off because you Know this, the central bank reached out and you + +[37:00] Know and so, yeah, it's being able to, you know, have multiple partners constantly, like wizard said, you know, constantly being like, you know build relationships with both regulators and your partners such that you know it's easy to overcome. You know times like this but, yeah, generally they'll just always come up. You almost can't avoid them. Yeah, I guess the this is also sort of a general question. Right, if so far, we've sort of been talking about the solutions that you built and the customers that you serve, but I think there's also people in the audience who are sort of budding entrepreneurs, who are interested in hearing about your experiences actually building a fintech and, you know, being able to like sort of respond to unexpected circumstances, to be flexible. That seems like part of it, but I guess I'm also interested in any advice that you would have for someone who's interested in starting a fintech company or who's interested In scaling one that they've started. Like what, what's + +[38:00] Like sort of the. Is there a number one thing that you would suggest to keep in mind if you're trying to build your own business. Yeah, I can mention a thing. It doesn't only fit on fintech businesses, for starting businesses in general, Don't try to be everything and do everything. And I think in the fintech it's more important because you need to take off quite some boxes like regulation. You need to have a partner. If you, if you're doing anything with financial activity often, yeah, you need to need various of elements in your business to together to work and I think + +[39:00] What is important there is to really look: okay, what is your core business like? Where are you? Get in as a starting fintech business and try to outsource us as much as possible. So, yeah, don't try to build your own KYC, AML solutions. There are multiple players in the market and then that's for those other activities as well. Try to focus on your core and, yeah, utilize your existing solutions in the market. Of course, that's a little bit different when you grow, but I think it's very important to do that in the beginning. I think mine would be similar to that along the lines of the core business team, I think I would just say for someone starting ventucker otherwise, but especially fintech: know the details, because there's so much that technology can Do these days that we often forget why we're + +[40:00] Doing things or what we need to really be successful. We see success stories in the news every day and it seems like you could throw any tech solution at the wall and something would stick. But I think that, especially for fintech- being in a highly regulated space, especially for building in markets that might not have as robust of an environment- whether that's Africa or somewhere else- it really does come down to knowing the details. of your customers and their ux UI preferences and their needs on a day to day basis and their comfort level with technology, Knowing the regulatory landscape, knowing your partner's comfort level with risk and any stipulations that they might have to adhere to or that they might be under. So I think that the details are where it can fall apart or succeed greatly, But knowing them enables you to set up a + +[41:00] Business that has a much higher chance of success and then be able to effectively defend those decisions over time. Because if you're building in the fintech landscape, You're going to have to defend every single one of the thousands of decisions that you make on a day to day basis to potential investors and partners and regulators and customers and anyone else who touches your ecosystem. So I think as much as you can know and be able to defend that, the better. Joseph orweeza, you want to touch this one? Well, oh, very, you know, very similar it's. And it's like Rick said, it's beyond just fintechs, right, it's basically knowing, like knowing your, you know your product, knowing what you're trying to build. I think one of the best ways to + +[42:00] Put it will, one of the best ways to put it will be to build the product first, before the business. Right, it's a bit tricky in a fintech, especially with regulation and stuff, but it's mainly in a way you- I believe that you still have you- have some room to ask for forgiveness, you know, before begging for permission, right, and so for as much as you can play. I'll say, like you know, just get, make sure that you have a solid understanding, like Tori said, of the product you're trying to build, of your users, of what they want, of what is really important to them and, in the same vein, almost at the same time, work on building very strong relationships with you know, partners. Your relationship with partners will probably, in many cases, make or break you. It's very important that you make sure that those relationships are strong and, in many cases, they'll protect You, especially if you're, like you know, a fintech + +[43:00] Starting out. Most of your partners are probably going to be the ones to protect you from, you know, like heavy hits from regulators, right? So, making sure those relationships are, you know, are really tight and I think wizard referenced you know, kind of finding out what their motivations are and then you know, walking around those, just as opposed to beyond just regulators, just your partners. Know where your partner's motivations are and make sure that you know you're working with them, You know, to achieve whatever they also want to achieve while you're achieving what you want to achieve. So basically, yeah, is that anything to add? No, I think, between what Tori and Joseph have shared. They have captured all of my ideas. Great, I mean One thing I think about as you're discussing all this is, I think, + +[44:00] Okay, the Stellar network is a great back end platform for sort of storing and transferring value. And ideally, in my mind at the sitting here at the Stellar Development Foundation, I imagine that all these different organizations, ecosystem participants would build on the network and be able to like, interoperate right, to be able to work with one another, to forge partnerships, to make payments across borders and across currencies. But I also realized that's a sort of. You know, that's my utopian vision of how Stellar should work, And I'm curious about your views on interoperating with other people in the Stellar ecosystem or in sort of the fintech ecosystem in general. How important is it that your sort of solutions that you're building play well with others? And what are the biggest chat like doesn't do they currently? And if and whether they do or not, like sort of what are the biggest challenges to Interoperation? I have a bunch of thoughts on this + +[45:00] One, so I think interoperability is like the next big barrier for digital financial services. Across the continent, We have seen relatively high penetration in terms of, especially urban centers, but, like you know, we still have a long way to go in digitizing finance. Right network international, who operates probably the largest point of sale terminal network for banks across the continent. Their latest data from last year indicates that 93 of transactions across the continent- retail transactions- are still in cash. So all of this fintech we're doing is lovely and the volumes are impressive, but like we have a long way to go, that's one now. Another side of that bucket is that different countries have got fragmented solutions on a per country basis. So even in individual markets, native interoperability is not yet where it needs to be. There are a few notable examples: Tanzania has got really good, Interrupt Uganda, so and so I'm not sure about the + +[46:00] Latest So and so I'm not sure about the latest in Rwanda. Maybe tory can chime in there- but there's still quite a lot of work to be done on a national level. Once we have the national systems, all of the digital financial systems, interoperable- and I'm pleased to see that over the last three years a lot of the bank and mobile money interoperability problems in East Africa and southern Africa have been solved quite well. But once we have that national interoperability, the next question is now around, like international transactions interoperability- and it was, I think, actually this week- that afri exim bank is piloting a continental payment system that is going to be used to facilitate a trade of intra Africa currency pairs like the ones that I described earlier. This is consistent with the African continental free trade areas policy towards unifying Africa, towards a single market. So I do think that, like you know, interoperability is probably the most important Thing we have to do at this stage. It + +[47:00] Doesn't we have to do at this stage, it doesn't matter. If you know, my money is sitting in cheaper and Tori has hers in leave? Why can't I send my money from my Chipper wallet to tory's Leaf wallet? We both support the ron and frank. We both support the kenyan shipping. We have a long way to go. Some of the standards that are being developed, such as moja loop, could be something that's quite exciting, but again, they're targeted towards specific verticals, that is, the telco industry. We're over the top financial service players. If we don't do interoperability, we'll end up with a scenario where you've got many monopolies in various regions, which is almost the status quo to date. So definitely something that needs to happen. I think that blockchains are an obvious solution towards interoperability. I just haven't seen. I think that the biggest barrier- the technology exists today, but the biggest barrier is fintechs in Africa don't have like a unified association the way the telcos do so. Moja loop is being pushed to the telcos through the gsma, which is the industry association. The first thing we need To do is unionize and bring out an association + +[48:00] Of over the top of financial service players and then agree on what sort of standards we're going to use for interoperability. I don't think it's been as much of an issue in other markets because Europe has got the eurozone. So if the currency is natively interoperable, you don't have to worry. The United States is a single market, china is also a single market. But we compare ourselves with these regions and meanwhile we are 54 very distinct and very unique countries with unique needs, challenges and opportunities. So definitely I think the next unicorns- are going to be interoperability platforms out of Africa, and it's a fantastic opportunity for those who are willing to give it a shot in solving. I completely agree with that. I think the fragmented markets that you mentioned are the biggest opportunities as well. So if the telcos are not going to solve that- you know, in Rwanda today it's Very difficult to send between mtn and airtel- if + +[49:00] They're not going to solve it for themselves because of the competition between those, then that's an incredible opportunity for somebody like Leaf or tripper or other fintech plays, to come in and solve it for them. So I've been excited to see that there are definitely companies stepping into those gaps. I think they've started at the business level, so the b2b plays- that's been a lot easier- and then, hopefully, the benefits of interoperability and bridge plays will come down to the consumer level over time. That's where Leaf is trying to play, of course, and we see significant opportunity there and I genuinely hope that others will come into the space as well, because it is such a fragmented market and there is so much opportunity. I think one thing that is a potential barrier to interoperability is the word itself, and so I don't know if it's, I don't, and so I don't know if it's helpful. Just to start with talking about integrations, everybody Loves integrations, right, they're easy, we know how to + +[50:00] Do them, they're very accessible for most companies, and so if, instead of going into you know a room full of people and saying, hey, let's all become interoperable, if it's having those conversations to say how can you and I integrate today and over time? That creates a network? Fantastic, it'll help us get there. And so for myself- you know, I look at it and I'm like, well, great, we already have assets for rondon frank's, canyon shillings, Uganda shillings on the Stellar network. That's very easy for us to push into chipper's wallet, for example. So you know, if we want to have a conversation after this and talk about what that looks like, great, because we already have that asset. I think that my fear in this is that we keep abiding. by the same playbook over time. And so it's. You know Leaf created these assets because we didn't see anybody else out there who had them. But you know that's that could also be justified for other companies coming into the space. You Know, if they're a new company in Rwanda that + +[51:00] Have wanted to have their own rondon frank coin on Stellar, they could do that. It's unnecessary. They should just use ours and we should learn how to play nicely together. But I understand the business reasoning and the fear that could create that landscape where everyone just continues to do their own thing. So I think we are very pro integration and very pro interoperability. Stellar is a good tool for that and I think, as the network is still fairly limited in Africa, but it's growing, and we hope that, you know, by sharing our experiences and our lessons learned- there's been a lot of them- potentially we can help other players coming into that space as we're growing ourselves, because, of course, we're still very new in the game as well. Great, I mean, Joseph, do you have something to add to that? Yeah, so I really like. Taurus points on integrations. That's Actually, that seems to be like a more or + +[52:00] Kind. That seems to be like a more or kind of like friendly approach, One thing. So we talked about mojo loop, and so I've been kind of involved with that movement for a bit. One of the issues, you see, is many, com many, you know, startups that should adopt it- All are looking to adopt it- to realize that, for example, you probably need to have like a switch right in Nigeria- It's going to cost you a lot of money to have like a switching license right- And then you have this thing where the startups say they want to come together and then build like one switch that they can use to connect, like you know, moja loop in another country. There's like all that kind of complexity around it and so just starting with integration seems to me like a very, like, you know, fair approach, like you know, as much as possible. If lots of startups, You know puts together the exposed apis, like to + +[53:00] Connect to each other, then maybe that's that'll be like a great start and then we can start talking about, you know, connecting across different brothers, like countries. So that looks that sounds very, you know, plausible to me. Yeah, I wanted to add to that will be interesting to see a couple of players in the African market that actually try to become such a central switch. For example, mfs Africa is trying to do that- and then on the other side you have, of course, Stellar, which is try to achieve that in a decentralized way with, as a non-profit having a Certain open source standards. It will be interesting to see, + +[54:00] Yeah, which way it's gonna. It's going to lean, whether it will be more with a central party or it will go decentralized. And I think it will take a bit of time, but expect that at some point it will lean more decentralized on, for example, Stellar, open source and having open source standards and actually, yeah, a central party, or that's at least what I hope. because if it becomes a central party- one or two players- then it's the question whether it really becomes cheaper. Great, thank you so I personally feel like that's a great place to end this sort of dream of hope that these standards for integrations become, you know, sort of tend towards the Open source, Of course, as someone at the SDF who + +[55:00] Is prizes for the, someone at the SDF who is prizes for the open source culture and believes that this network should be permissionless and owned by everyone and easily accessible, and that we should work together to create similar standards rather than having one single party own them. That sounds great to me and I would love to see that happen, but of course, you know, what is interesting is that what happens next is up to the people here and the people listening, people in the world in general, the people who are building businesses, and also to their users. Like what is going to succeed- And I think only time will tell- But at this point I do want to take these last few minutes because, as I mentioned at the top of the presentation, or at the top of the panel, we, the SDF, are working with DFS Lab to sponsor a bootcamp and I just want to take these last few minutes so that anyone to give some information, so that anyone listening understands what that means, what it can do for them and how to apply, and so I want to thank everyone for their Participation. Thank you so much and pass it over to + +[56:00] Joseph so he can. And pass it over to Joseph so he can tell us a little bit about the bootcamp. Yeah, thanks, Justin. So we're working. So DFS Lab is working with the Stellar Development Foundation. It's to kind of open up an opportunity for early to mid stage startups who are interested in building a solution for on Stellar, who are interested in building on Stellar for Africa we are looking, you know at, Basically it's it they block. The bootcamp follows like a you know design sprint approach where over a period of three days, you go from you know idea to prototype in, where you follow a bunch of steps right to go from idea to prototype by three day period. And then we Have demo day on the fourth day where startups or + +[57:00] Participants can win, you know, anywhere from five thousand to twenty thousand USDC in prices, So the most promising startups you know, during the event and there are also opportunities for follow up funding from the Stellar Development Foundation enterprise fund and other investors who, at the boots, got the demo day right And then- and it's kind of open to fintechs, open to companies that are currently building on the blockchain and then companies that are just, you know, thinking of it, like the way we refer to them as cryptocurious companies who are building in Africa. It's open to so to any of those type of people to apply. You can visit the websites that you see on the screen right Now. The link on the screen to apply applications clues + +[58:00] On the 20th of August and, yeah, if you have any questions about it, feel free to email me. That's Joseph dfslab net, but I'm fairly positive. Like you know, you have all the information you need on the website, So please feel free to apply. We're looking forward to having you during the event. Great, yeah, thanks everyone. So, obviously, as Joseph said, you can visit that link on the screen to find out more. You can email Joseph dfslab net. Applications, as it says, close August 20th and our goal here is to sort of help support and incubate projects in Africa that are focused on fintech solutions that solve real world problems and that's the end I really want to say that this was It was a Real privilege to have all four of you here. Thank + +[59:00] You so much. I- all four of you here- thank you so much. I know that we just scratched the surface, but I feel like we started to get into some interesting areas and to talk about, you know, what's going on in the region in integrations and also, I think, there was some really interesting nuggets for people who are interested in building their own companies or scaling their own companies. So I really appreciate it and I hope that everyone out there listening- also came away with something. We do plan to have more of these ecosystem roundtables in the future, so you can always find out more about those by just going to `stellar.org/events`. Thanks everyone for participating and for watching. + +
diff --git a/meetings/2021-07-29.mdx b/meetings/2021-07-29.mdx new file mode 100644 index 0000000000..0910f7ba30 --- /dev/null +++ b/meetings/2021-07-29.mdx @@ -0,0 +1,186 @@ +--- +title: "Payment Channels on Stellar: Generalized Transaction Preconditions and Multisig" +description: "This discussion examined two protocol proposals that enable payment channels on Stellar: generalized transaction preconditions and a new multisig mechanism for safely exchanging bundled transactions. The conversation focused on design tradeoffs, implementation details, and how these changes support scalable off-chain payments." +authors: + - david-mazieres + - eric-saunders + - jed-mccaleb + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [legacy, CAP-21, CAP-40] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session focused on two closely related protocol changes that together lay the foundation for payment channels on Stellar. Participants explored how more expressive transaction preconditions and a new multisignature construct can enable secure off-chain state updates with periodic on-chain settlement. + +The discussion emphasized practical implementation concerns, backward compatibility, and long-term extensibility. Much of the conversation centered on how these CAPs interact with existing ledger structures, SDKs, and signing workflows, with the goal of supporting high-volume use cases without introducing unnecessary complexity. + +### Key Topics + +- CAP-21: generalizing transaction preconditions beyond simple time bounds + - Support for relative time locks, ledger bounds, and additional signer requirements + - Design tradeoffs around account extension nesting vs. flattening + - How preconditions are validated versus applied during transaction processing + - Implications for SDKs, signing safety, and backward compatibility + - Payment channels as a scaling mechanism through off-chain transactions +- CAP-40: enabling safe exchange of signatures for multiple related transactions + - Single-step authorization of up to three linked transactions + - Reducing round trips and coordination complexity in payment channels + - Relationship to existing multisig and hash-based signers + - Considerations around scalability, signature limits, and future extensions + +### Outcomes + +- Agreement to revise CAP-21 to address account extension structure and validation semantics +- Consensus on a clear path toward accepting CAP-21 after targeted updates +- General alignment that CAP-40 is valuable, with a request to add a standalone use case +- Plan to revisit both proposals together for acceptance after revisions + +### Resources + +- [CAP-21 – Generalized Transaction Preconditions (Proposal)](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) +- [CAP-21 – Generalized Transaction Preconditions (Discussion Thread)](https://groups.google.com/g/stellar-dev/c/N8vzP2Mi89U) +- [CAP-40 – Payment Channel Multisig (Proposal)](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) +- [CAP-40 – Payment Channel Multisig (Discussion Thread)](https://groups.google.com/g/stellar-dev/c/Wp7gNaJvt40) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to the Stellar Open Protocol Discussion. So in these meetings we discuss Core Advancement Proposal, aka CAPs. These are technical specs that suggest changes to the Stellar protocol necessary to allow the protocol to continue to evolve to meet the needs of the ecosystem, and we live stream these meetings so that anyone who's interested in watching they can follow along, although I do want to know it is a technical discussion. So if you are watching, you may want to take a look at the CAPs themselves and they're linked to in the show description. + +[01:00] Also, we do keep an eye on the discussion box and your comments there. They do help us inform our decisions going forward. Today's discussion will focus on two CAPs and our goal is really, as it is generally in these meetings, to try to resolve some outstanding questions about them. So we may not address questions that you ask directly, but if some relevant ones come in, I may actually try to incorporate them into the conversation. So, as I said, we are focusing on CAPs and this meeting is just part of the CAP life cycle. So generally CAPs are discussed on the Stellar depth mailing list. There's a link to that mailing list in the event description for anyone who's interested in joining the discussion. Then CAPs are drafted and iterated on based on feedback and suggestions and they end up here in this protocol meeting and based on what happens in this meeting they may be put up for a vote by the CAP. They may be put up for a vote by the CAP committee who decides whether or not to accept a rejected CAP, which point they move into Final Comment Period. So there's, everyone has a last chance to raise questions on the Stellar debt mailing list and if it makes it through Final Comment Period, a CAP is implemented into a major Stellar Core release. + +[02:00] Then there's still one final step, which is that every major release, every protocol upgrade, is voted on by the validators and has to be accepted before it's applied to the network. And so all of that is to say this is a and so all of that is to say this is a long process where there's a lot of public participation and ultimately the network decides to accept major protocol changes. So enough preamble. Today we are discussing [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) and [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md). Both of these lay the groundwork for building payment channels on Stellar. So payment channels. They allow multiple parties to securely transact off-chain and periodically settle on chain and, among other things, that could make it easier to build high volume use cases on Stellar. So if today's discussion seems dense at any point, keep that in mind. These CAPs make technical changes geared towards payment channels and payment channels are important for scaling network throughput. So without further ado, we will look at the actual CAPs. We're going to start by talking about [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). This proposal generalizes the time balance field in transaction to support other conditions, including conditions that relax sequence, number + +[03:00] Checking and provide relative time locks, and since we last discussed this, there were some small technical changes. There was also the addition of extra signers and I think we should start there. The new precondition- extra signers- has been added. Lee, can you just explain what that, why that edition happened, and sort of talk us through any questions that you have about it? Yes, so extra signers was initially added to support stateless htlc's. So the ability to just to say that this transaction needs to be signed by, or a hash needs to be included with this transaction to make it valid without having to store that hash as a signer on the account. So right now you can use the hashtag signer to say that you need this. You need a hash included with transactions to be able to transact with an account and, instead of needing to store that + +[04:00] On the account, it can now just be stored within the transaction and then, second, it's also being used for [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md), which we'll talk about later. Would help for me to like overview the other changes to [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). Just because you know, I think it was already, people were kind of generally okay with it the last time we discussed it, and so a lot of these changes are kind of in response to leads, like specific implementation and experience, like things that just came up because of implementing it. I think that would be super helpful. Yeah, great when you're working, so, yeah, so one of the changes is that the previous version didn't really talk about the results, and there's new failure modes now because you've got these multiple preconditions and so you might have sort of multiple preconditions failing and in particular you know you could have preconditions failing because it's too late or both, and so what we basically said is if a + +[05:00] Transaction can never be valid again, then even if there's other things that are too early, we'll still call it too late and we'll give the result code tx2 early, only in the case that there is the possibility that the transaction can be valid again if resubmitted at some later time. So basically, if you have said a thing that's too early and a thing that's too late, that gets combined to a too late error and so that fits it into like the existing error codes, and I think you know preserves the sort of the intuition: if you're too late you can never resubmit and if you're too early there might be a chance of resubmitting at some later point. So other things that we changed. There's some clarifications. Oh yeah, the we kind of rearranged the stuff that's dangling off the account of the account entry in the ledger, because it turns out that + +[06:00] Kind of the sponsorship information is only there if you're actually sponsoring things and so otherwise it's currently null or it's a v1 extension instead of v2. So we changed that into a pointer and we just kept the v2 data structure as a thing that dangles off the v3 now optionally if you're actually using it. And then the final change is that we didn't talk about we previously I hadn't mentioned what to do to an account that didn't have an account entry extension v3. And so the answer was: just now we just say: pretend, all the fields are zero, right? So that means like it's as if, like the last time it was mod the last ledger was modified in time and stuff are zero, and that's okay because even though that means anything can anything that's like a relative delay will execute any time you would use these things. It would be on some new account in some new protocol, + +[07:00] And so therefore there would be some other transaction on the account. Knew that zero wouldn't matter. So again, it's just to be precise, I don't think it really matters, but it's important to like say that it's something, just to make sure you know, everything is deterministic and fully specified. So I think that's pretty much all we needed to change right? Forget anything, lee. No, I think that was everything that we listed and I think specifically to do with, you know, the extension, like I'm really interested to hear what Nico, Siddharth, John, what you folks think of this and how this might impact core. Well, the first thing I'll say is you were the one who kind of just prototyped out this implementation, so what was your experience doing it? So I haven't tried the new change, so the diff. The difficulty that I definitely + +[08:00] Hit with the initial proposal was that there are assumptions made in core that the sponsors, the v2 extension is only present when the account's involved in sponsorship and that shows up a lot in the tests, more than the actual application code. So I guess, if call makes that assumption in tests, are there any other products or anything else that makes that assumption, that might be interpreting XDR comes to mind. I think. Please continue. I think the new proposal which says that you know, the v2 is still only there when the sponsorship. I think that works from my perspective here, like I don't think it really matters that much. I don't think it's really worth fighting about much honestly, in the grand scheme of things, like I + +[09:00] Could see some merits to this. Now that we have all these dangling things, maybe it would be nice to, every once in a while, you know, smash them all down into one single dangling thing, then dangle some more stuff off of it for a while, and then smush them all down again at a later date. Again. I can see merit to that, I think, in terms of like how core treats these things. Core, basically like the process that we've used for upgrading these extensions- is basically like there's like two parts to it, like one thing is like, once it comes to existence, will never unwind that. So like, if you sponsor an account and then remove the sponsorship, you'll still have the v2 extension. So the tests, most of them, have these assumptions about when will these things appear, and that's what you were running into, not about whether they will be there or not, period. The other thing is that we are kind, we already kind of made this executive decision when we added the sponsorship stuff, that like + +[10:00] Imagine that you didn't have any native liabilities so you shouldn't have had a v1 extension, but then you participated in the sponsorship. Now you have a v2 extension and we just said like, okay, well in that case, we'll just inject the v1 extension and set it to zero, and it's no different from the case where you would add liabilities and then subsequently remove them. So from my perspective, like core is designed in such a way that, like, either solution is completely reasonable. I think, like in terms of the total net implementation changes, it would probably be less if we dangled it, but the size of the extension would be less if we didn't so like. So, just to be clear, you're okay with the current state. is like a right hybrid, right, where v3 includes v1 but dangles off v2, so that's kind of that, that's. There's basically this thing called sponsor info. That is a pointer to an account extension v2, but that's inside the account entry. Extension v3. + +[11:00] Yeah, that would be fine. I mean again, like it doesn't really matter, but like if you're gonna do that, then the marginal additional cost of dangling the v3 extension off the v2 extension is even smaller because the four extra bytes are appearing in either case. So you're saying that there's no situation where it's important that the v2 be null, correct, okay, so well, anyway, I guess I don't. Really I don't care. What I really want is for [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) to make it in, so, like, what's the best structure to like make this happen, or is the current one? I mean, I think it's like I'm the one that always brings this back up, you know as soon as you. So you know, the current proposal, in a way, is kind of this weird hybrid thing. Like that is neither of the nested one neither the you know the fully like represented one. So I say like either we consider one or the other, but + +[12:00] Not this kind of half broken one, because you get basically like the reason why you want nesting in the first place. That's kind of the whole rationale that we have in the repo actually, and why we are doing nesting is because it makes it easier for people to they don't have to change their code, right, like if you depend on, let's say, sponsorship, you keep referring to sponsorship. The same way, you don't have to add the non. You know conditions, like, oh, if it's a v3, I need to go look for sponsorship over there instead of over here, right, and so there's never, it's never going to be useful to take something that has sponsorship info and then delete the sponsorship info because, or to kind of create new accounts that just don't have any sponsorship info because they've never sponsored anything. That's the question. It sounded like lee kind of wanted this in order to make a lot of the tests easier to port. Oh well, does it act? There's not going to be easier. Hold on, + +[13:00] Wait. Let me clarify something there. The v3 extension will always exist for a newly created account, right, that's right. But the sponsorship info might not, whereas like and it. So it sounded like currently there's a cal like in a lot of tests. There's like accounts that don't have sponsorship info and they think they never get upgraded to v2, and I don't know that this is somehow I don't know. Lee, do you want to? This is primarily. I did this primarily in response to your feedback, lee. So I don't know if you want to jump in, and yeah, so that there's two. So I think so far we've only really talked about the impact on core and it sounded like, from what John was saying, like there's not really a big impact on core and I think the concern that I had around the impact on tests is maybe misplaced, but the second impact is the impact on the ecosystem, which I think is what Niko was highlighting to- and that is, you know, everyone who passes this xtr will need to look for fields in the v1 and the v2, In two places they'll need to look for it either in the v1 or v2 or in v3 if we move it, and I think + +[14:00] In V3, if we move it, and I think that's maybe something that eric well, like the Horizon team, would you know that's something that they would be having to deal with. Basically, anyone who passes the xtr has to know to look for it in all these different places, which can also be a little bit of a foot gun for people who are upgrading because they may not realize that this field has been duplicated into a new place. Yeah, although we can mechanically fix it with, like any place that gets translated to json, for example, we can just mechanically always move the sponsorship info out. Yeah, I don't think we should be making- any decisions based on the effects of downstream applications consuming ledger entries. Realistically, we're talking about one, maybe two consumers. So you know we can talk to them, okay. So I think that I mean, to my mind, the two things that make the most sense are to either keep the [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) draft exactly as is, or to flatten + +[15:00] Out the sponsorship info the way it was in the previous revision of the draft. So it's just all one big like v3 structure. Wait, well, you're saying that. So I'm looking at the current master. Is that what you call the latest draft? Because this one is the hybrid one that has, like this half flattened version exactly, and so I'm saying either we just keep that or we should fully flatten it, just like, instead of having so no, but that's actually the one that doesn't make sense, the one that's half flattened. Either you're doing a nesting, fully nested right where you extend d3 inside v2- okay, or you're doing the full flattening- okay, and then let's do that. How about the full flattening? How about I just, instead of making that up, instead of making is having sponsor info be a pointer to like an account entry or account extension v2? How about I just take the fields of account entry v2 and make, put them in line inside the county account entry v3 or + +[16:00] Just put the whole v2 structure in there? Yeah, I know I don't really care, honestly, flatten it or fully nest it, but yeah, the hybrid thing, I think is kind of like the worst of all the worlds. I propose that I will. So I propose to just basically revert to what it was before, if lee is okay with that, what was it before? Yesterday? It was nested before. No, it was flattened before. It was just the fields at the fields of v1 and v2 structures were just inside the v3 structure. Yeah, the things that I do think that flatten- every time we flatten structures it involves changes in a bunch of places that we may or may not understand, and I'd rather not do it if we can avoid it. So are you advocating for the nested version, foodie? Nested is kind of the as a default is safe + +[17:00] Because we don't definitely require the fewest changes to core because we already have, well, not just score, but like, yeah, it's not just core, it's also right like the. You know anything out there that is actually using sponsor, like looking up sponsorship, let's say, so what about having them, not as pointers, but just putting the v1 and v2 structures into v3, so then any xtr library can just kind of extract those things by type. No, but it's the same problem. You have to switch, like you have to change your code that still compiles and still looks like it's working, but it will just fail miserably when you actually run into that like existing code. It's always the same problem. Right, kind of like the problem. We, you know, we created with the. What was it? The next accounts, right, like, but I would argue that like mux accounts is precisely because it's way worse. Mexican was way worse because + +[18:00] Of course people use accounts a lot more right, but here we make it even more french. So that's kind of why, you know, if you're looking at like the edge case of the edge case and you hope that people are going to do the right thing, I think it's actually less likely, because I mean the problem is that the nested is just like much harder to program, to right. It's like you're literally like fighting with, like the right hand margin of your text editor all the time where it's like you have to put like line breaks in the middle of, like you know, expressions that contain dots and arrows to access fields. So it's just like you know we're getting the point that we're now we're gonna access fields that are, like you know, four or five or six deep in structures. It's just kind of like painful for the programmers. So what? So like yes, there might be like a little bit more stuff to do in transitional, though I would argue that actually, if, like, we do our xcr stuff right, it's actually not + +[19:00] That hard because you can automate a lot of this stuff, and so I'd rather just have a little bit of pain up front and just have something that's like much nicer to program to in the long run. So when I wrote the prototype, I actually used the nested version. I didn't use the flattened version because it was easier to do an essence. I mean you put the account entry v3. You didn't use the security, put the account entry v3 inside the country v2. Yeah, I'm pretty sure that's what I did and it was. It wasn't that bad how wide. What's your? What's the width of your text editor? I mean I use softwrap. The thing is that, David, like the, if you want to have a helper function to avoid typing all this stuff, you can right like it's it. That helpful function is optional in the nested world. It's mandatory in the non nested world and we already have. All right, okay, whatever, I'm overruled like no, but + +[20:00] That's gonna vote. You guys are all wrong, but I'll do it this way. I'll rewrite the CAP to do this. You know, generations of, like poor Stellar programmers are gonna have to be typing structures six nested deep because, like whatever, you guys are worried about a non existent problem. But fine, it's more important to get [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) accepted. So fine, it sounds like a reasonable concession. There we are going with the nested version in order to get [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) done. I like it. I think this raises interesting points, though, for the preconditions type that we're introducing, because the precondition type is setting itself up to be like a flattened version, or at least that's that was my understanding of the proposal- it would definitely work better. I mean like in the like. Five years from now, there will be no benefit, you know, there will be only disadvantages to doing what we're doing now, but like, yes, in the short run, it will be slightly smoother + +[21:00] To do it with like a fully nested thing It's just like just how, when they were choosing, you know, not to fix the precedence of the bitwise and or operators, when they went from bcpl to c and introduced, like the logical double and double r, and they're like, well, you know, yeah, this is broken, but there's like hundreds of c programmers in the world like we don't want to, you know, break backwards compatibility and you know, then we got to the point that even java adopted those broken. Let's get back like I don't know I understand what you're saying, but at the same time, there is this like, there is this need to sort of get something out there that people can adopt and start using in order to well, I think there's really there's use cases that [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) allows. There's two problems here, I think one is I think people are like massively mistaken about. You know how hard it is to adapt to something where you have like multiple versions of the structure, right, and we already have an existence proof that we did it with something which much harder, which was the accounts, in order + +[22:00] To get the most accounts right. So if we survive that, we can definitely survive this. The second thing is: I think people don't appreciate how much should be automated, because currently in our SDK we're doing too much sort of manual stuff. Right, and if you use sort of like more meta programming, friendly XDR libraries, like, say, you know, goxster, the one that I wrote, or the xdrpp that we're using for Stellar, you can actually automate a lot of this stuff. And so what would be great is that, since there's like the places where people actually care about this- probably, if they do it all- is when they're manipulating the json, and it's not- it would be easy to write just like a few simple rules to like make the json look the same, or to understand the json both ways. But you know what? I'm? Just beating a dead horse we'll. I'll just, you know, maybe a thing to do is like introduce a CAP, that's like a meta CAP, but like this is how we should do upgrades and show some evidence for why, in the future, + +[23:00] It's better to actually version structures and put the versions at the top instead of putting these ext things at the bottom. But whatever. So general preconditions. I think that's what was it? Lee, it was yeah, asking about that. Yeah, sorry, I think in general, a, it's a misleading name. Yeah, I'm not really too fussed on the name, but I, I'm just not the. No, it's not the name but the. You know, right now it's hardcoded, not general. That's what I mean by. It's a lie. So I thought it was habit. You wanted to become like pre conditioned v1 or something like: what do you want? No, I would like make it an array of you know, static array or something. But we've already let's not relitigate that, I know we do. We already discussed this already. only introduced because. So actually they have a more of a question related to the CAP, + +[24:00] Because what have been the time I spent, actually in the past few days was looking at the step that is, the payment channel protocol, trying to see, okay, like how is that set related really to the cap? And it doesn't look like the set is using all of the functionalities that are in the CAP. So that's kind of what you know where I'm coming from. It's like I feel like it's trying to guess on use cases that maybe don't exist and at the same time, I would prefer to grab something that's that can be extended easily as opposed to right now, because we can just version it right now. If you mention it, you break everybody. Even if we're doing the fully nested. There's no reason why we can't version the v3 thing that's dangling off the v2 right, like if you want to add a new condition, you would break all the all consumers. No, because you would just + +[25:00] Have a data structure that includes: yeah, now you need to add access also, every single condition. No, you just need a function that always converts to the latest version. But that can be done in a, in an automated way. Could we just add a v2? Can we just add an extension on the end of the general precondition so that we can v2 it dangling? I mean we're going to have to if this thing is like we're totally bike shedding this thing, like and did someone else wanna make a pass at this? And then, like I'll clean up the mess, so that, like, because otherwise I'm gonna do something that, like you guys aren't going to like. I think it's worth just responding to Nico, what you said about. You know, the prototype channel protocol doesn't use all of the fields, the only field it doesn't use right now is ledger bounds. It uses everything else, + +[26:00] And I think that was a comment or something that David and I talked about very briefly was it doesn't make sense to keep ledger bounds because of that. Why what is the argument for keeping electric bounds? The argument for keeping the ledger bounds is that the current payment channels are kind of probably safe enough, but they're not safe if there's, like you know, something happens and there's, like you know, an hour of downtime on the network or something which you know does actually happen periodically. So somebody who's paranoid and wants to make create a payment channel that is actually robust to downtime of the network would also need the ledger bounds to be safe. If right, because it never goes down, all your timers expire, but if you have this ledger bound thing, then you have. Since the network wasn't creating you ledgers while it was down, + +[27:00] You still have some opportunity to, you know, get in there and prevent someone from incorrectly closing the channel. Personally, I'm like not really that concerned about changing the like. However, we struck for the preconditions like we could make it an array of you know, of whatever, of unions, and that would be fine, or we could make it a, it doesn't really matter honestly to me. I think, like, at the end of the day, like people are caring much more about writing these than reading them, like the only thing that really should be reading these at the end of the day is, you know, like, if you for some reason want to inspect something to make sure that you're signing the thing that you mean to be signing, which I think is like a very small subset of the code, because a lot of times you're assigning the transactions that you're creating yourself, in which case you kind of know what you're doing + +[28:00] And core, and since, like this stuff is super isolated down to just like one code, path and core, and if you read the wrong branch and you're you know signing code, and then you're just not going to sign that transaction because you didn't know what it meant or your code didn't understand it. So like, I don't feel like the risk of blowing yourself up with edge cases here is particularly high. So like, let's not fight about it that much. Like we have unions on all sorts of stuff everywhere and we just kind of you know transactions for like, for example, I could send you a v0 transaction. If your code only knows how to handle v1 transactions, you're going to blow up. So like we already serious problems is, John, is your comment about whether or not we need ledger bounds? No, it's about like the whole, like Nico saying like these aren't really general. It's about that part. I personally. I think my point is kind of like the ludger band seemed reasonable to me. The structure is reasonable to me. We could do a different structure and it would also be reasonable to me. I don't see a lot of reason to hash it. + +[29:00] I don't see a lot of reason to hash it out for 20 minutes. So I is just it. Could you, could we just change the name from general preconditions to something else? Yeah, I mean like if you named it like preconditions v1 or something that would map up with the names that we normally use. But like that sounds good, doesn't really matter to me, because if you didn't change it I would just stick. Like if you called in general preconditions, I would just like if I needed to change it later, I would just call it general preconditions of v2 and call today. So you know, can we add an extension as well? So that we just follow the same dangling pattern. I just don't see a lot of reason to is kind of my point here. Like if you can't handle the types of transactions you're trying to sign, you won't sign them and like, great, if you didn't sign something, then you didn't agree to something you didn't understand. So that's not that I mean The point is it's already in union, we've already got like no preconditions and time bounds right. So + +[30:00] There's like, if it turns out that we need a different kind of preconditions, we can just add a you know a preconditions v2. Nobody can handle the v like the what is? It's the extension to like number two, like case number two, right now anyway, without modifying their code. So, like it's more about thinking about what happens when you change, when you make a v2 of that thing. Right, yeah, that's what you're saying. Though, like if let's say that I make, let's say that, like I add a new feature, which is, like I don't know, like time times ledger bound some super crazy stupid condition that nobody, no, how about number of ledgers that aren't completely flawless- number one, that's actually plausible- number of ledgers that aren't completely full. But like, if you want to sign this, you're only going to sign transaction. Like, if you want to sign stuff and you want to understand what you're signing, so you're checking for some condition. You're not going to sign stuff that you don't understand. So like your code might barf, but like you won't have signed something that you didn't understand, so it's fine. + +[31:00] Like I don't understand what the bad, case if you don't understand something is, you're just not going to agree to something that, like I don't know, I don't. I don't think there's a bad case there. I think the case that I'm concerned with is the same case with that we were talking to about with the account extensions, and that is that every SDK, every application that pauses the XDR, now needs to look for time. Or now let's take a ledger bounce, so, say, ledger amount stays in the v1, then we add v2, ledger bounds is now going to be in two places, so it needs to look for it in both places. So there's the parsing case and then there's also the creation case for the transaction. So SDKs need to make a decision about you know what's the general best practice for creating a transaction that has ledger bounds. Do they use the new v2 that has ledger bounds, or do they use the new v1 or do they only use the v2 when they're using one of the new fields? And for the most part in a lot of cases, that probably won't matter. But in the cases where somebody builds a transaction and then they want to build that exact + +[32:00] Same transaction and they want to expect the SDK to build it the same way, that does matter, because if an sk SDK developer decides they're just always going to use the latest version structure, then the transaction is going to change every time they build that same transaction. So we're actually in some ways we're actually introducing it backwards incompatibility, because we're saying, like you conceptually build the same transaction, but you know SDKs may make it so that you're actually building a different transaction. I see what you're getting at here and like that's a concern. I don't totally think that concern is necessarily exactly relevant here in the sense that like I don't know what promises SDKs are making, but like if an SDK is promising to have that property, that we're not going to retroactively change your stuff and that's really a property of the SDK. Because, like, if you're creating like a semantic transaction, I feel like the SDK just has a promise to do what it's + +[33:00] What you asked it to do. But if you want to have a stronger constraint like an exact transaction, you probably need a different vocabulary in the SDK for that. But putting that all aside, like the thing that makes me scared about like doing the nesting in this context is like you really don't want to sign something that you don't understand. Like, imagine that you're a guy and you're like I'm looking for this ledger bound structure right, which I know to be in the case 2 extension, also known as general preconditions- but like, imagine we're dangling a v3 off of it. You have to actually like you can't just call a function to look up the v2 one and look out, look up the ledger numbers, because you actually have to check to make sure that the v3 stuff isn't there, because if it is there in nested, you're going to sign something that you didn't mean to sign. This is what I wrote in the code, example in the agenda. This is why it kind of explodes and becomes pretty awful. So as versions increment, you're going to get more and more of these to go + +[34:00] Down the rabbit hole of checking. So I really think like the much safer thing- is to just fail to like, fail to sign things you don't understand and then update your code to understand them, instead of having to handle all these cases of like, oh like. What if the? What I need something that's in the v4 but the v3 doesn't like but the v3 exists, but I don't care about anything there. Maybe the v5 like. That gets really confusing and your code can change out from under you just as the extensions grow. Basically, and I think that there's another thing too, which is that if we were really trying to build this like little language of like, you know, have an array of little precondition operations that can be sort of like arbitrary or, you know, it can be constantly extended. Honestly, we then we should try to unify that with like the claimable balances- right, because we have like two different little languages for like preconditions, right? Or if we do have two different ones, then it just seems like really needlessly complex for people to like learn how to use this. So what we have is + +[35:00] Simple. It is not, it doesn't have to be the last word because we can change it by again using these unions and it, you know it covers like all the things that we need to do: payment channels, plus the thing that we need if we want to do like super safe payment channels, which like there's, you know, some people like might want to be able like people could potentially object that you know well, on Ethereum, I can make it safer by counting ledgers or something that like this is like a good set, and that we shouldn't let the perfect be the enemy of the good. We just do this and if it turns out that it's like so successful that we want like 12 different other slight variations on the preconditions, then we can do a an array of a programmable thing. Or maybe we can update the claimable balances and have like a general language for describing preconditions. I'm pretty in sync with David on this one. Personally, I think the argument is basically just a lot different than the + +[36:00] Ledger entry stuff where we, than the something established that we're already doing. So what would that mean? The next step is for [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), like what is the revision and who's sort of taking it on? I think the only thing we said we might change is to change general precondition to precondition v1 and that was it. That's it. And we would keep ledger bounds- wait. I thought we were also. We had to move to the extensions, these like multiple extensions. I think what John counted. I thought what John and you were advocating for was no extensions. I thought we were just talking about the preconditions, not the legendary extension. There's two questions, right. One is: should our preconditions be how to do future accessibility for the preconditions that are embedded in the transaction? And the other is + +[37:00] What to do about the ledger entry extension part or the count entry extension part, right? So, John, I believe you and I were talking about just what goes in an actual transaction. The preconditions, right? Yeah, that's the thing that I think is really dangerous to have these accessor functions for that. Just like, look that don't necessarily look all the way down to the bottom, right and so, of course, while I'm happy to leave [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) and this is it sounded like we wanted, like it sounded like the everybody else wanted me to make, like these, to dangle accounting extension v3 off of account extension v2. Yes, yeah, sorry, I left that out at least. So I guess I can just do that. Fine, I'll change my stomach, but I'll do it and so that after that change we'll sort of reevaluate [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). Are there any other questions that we want to talk about [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) right now, or should we move on to [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md).? I did have one other question about this. + +[38:00] I did have one other question about this. There was like something that struck me as kind of strange: yeah, so there, actually I had there like three lines I pulled out and like all of them were basically about the exact same thing and I kept thinking that this was really weird. There's a sentence in the proposal that reads: all transactions are validated, sequence numbers increased and fees deducted before any operations are executed. One consequence is that bump sequence operations, though they update an account, seek time and seek ledger, do not affect the validity of other transactions in the same block. But that's actually not how things work at all right now. So this is like either a very profound change to the way that things work. That we'd have to recognize is a really big change, even though I don't think people are using bump seek a lot or it's a misunderstanding of the current situation. Sorry, the audio is breaking up, + +[39:00] Sorry, what's the line number or what can I search for, because you broke up when you're saying which, yeah, let me just tell you what to search for. If you search for, one consequence is that'll probably work. Let me see if that works. Okay, yeah, I often find myself searching for one consequences, right, but that actually is what happens currently. Right, bump sequence happens when you're executing operations. It doesn't happen when you're validating operations, correct, but, like, let's imagine that you have one source account. You submit transactions t1 and t2 at sequence numbers. You know n plus one and n plus two. You get the point and the behavior: like both of them are no ops, they contain like zero. Sorry, I don't know if David can hear me at all. Right now, no, I can't. But + +[40:00] Oh crap, hopefully I'm on summer when, like, I can't, I couldn't do this meeting, but I want to. move forward. So you have, okay, where's the chat? Let's take this one to the mailing list. I think, yeah, and I want to point something out that doesn't actually impact the behavior of the bump sequence. The payment channels are not actually + +[41:00] Dependent on that. So I think that what that breaks is that it breaks that right now you can have transactions that fail later because of that sequence number, exactly like, as you can imagine, you have to go on the mainland list. At this point, you're bound from this call. There's no chat, David, not a new chat. That I see at least. So, yeah, there's not a ch, there's not a chat in this interface. One does not simply chat during the protocol committee meeting. Yeah, it sounds like he can hear it. So, yeah, the concern is that you have two transactions, first transaction and sequence number, you know nn plus one, and then the first one does a bump sec to n plus ten, this invalidates the second one that will fail with a bad sequence number. But that will only happen at applied, you know, like when you actually + +[42:00] Apply transactions. And the proposed change would actually break that if you are changing, because it would allow both transactions to execute exactly if the second one has like some funny, you know, side effects in the proposal. Now those side effects can actually happen and there are like some pretty important consequences to another sentence in the proposal, which is that earlier in the proposal there's a two sentences that read: a transaction whose preconditions are not satisfied is invalid and must not execute, even to fail, meaning it cannot change the source, account sequence number or charge a fee. But basically the example we just gave, you could imagine a like a seek gap thing- min c, mint c gap and seek age, whatever you guys know what I'm talking about- + +[43:00] And that would be broken by the current bump seek semantics because it would actually write it during the apply time and then the later one would be invalid at apply time and therefore fail. So yeah, there's probably some like edge cases here that we have to iron out either we have to say that these work in a different way from other stuff, or, yeah, I'm not super sure you'll not allow. I think I thought what we talked about before was maybe not allowing more than one, so like that source account can only be in one transaction. But that's not necessarily easy. Well, I know it's not, but that's the only way you can make this work, I think. But you know, maybe there's a better. I'm that's, I think, the only way, but that might be other one, another one, not that people can figure out, but it sounds like this is something that will bear further thought, + +[44:00] Like it will require further thought to sort of figure out what's the best solution here, and so I think this is something that someone- maybe John, if you should raise on the mailing list. Yeah, I'll send me an email about it, or something. Call me, by any chance. I don't actually. No, David, you have solar flares or something like I'm on a amount of crap you might find out, but like this is like super important. Or can we have this meeting next week or something. I knew this was like a bad week, for this is like the week that I wasn't gonna have a wired interest. David didn't predict that. It's true. Did yes, I apologize for trying to force it forward, but I mean we can. Certainly this is super important stuff that we're talking about now. So there's no way. Zoom usually allows you to like call and how and get in by phone, but when I click the thing, it doesn't give me a + +[45:00] Phone number. The crazy thing is I can hear everything you're saying now. So maybe just tell us what you want to say. Okay, what I was gonna say is: if you have transaction n plus one, and n is a bump sequence to like, say, n plus two, both can currently execute. Yes, they will both execute, but no, currently, yeah, guarantee, the second one fails exactly. How does it feel? So you've actually executed the bump sequence when you're validating transactions sequence. Suppose transaction n like changes the signer or something, right, you're still charging a fee for transaction two, right, but it fails exactly. Yeah, it fails. So what I'm like what kind of ends up happening is like any check that we run during validity checking also gets run during + +[46:00] Transaction application. So basically, you get all your validity checks get done twice, once kind of speculatively and once for real, for real- and if it fails the validity check that it does again during application, it fails at application, it's on the ledger, it's charged a fee and the sequence number is consumed. Yeah, okay. So what about this? What about saying like: these preconditions always get checked twice. If it passes the first time, you get charged the fee. If you get, if it passes a second time, you then can execute. That that's fine. That would basically map up what we're saying. But the question is like: does that break your stuff? No, I mean it's a little more expensive, but it's like, well, the age thing. That's what John was talking about, right? Yeah, I don't know if you were able to hear me at that point. But, like this, would this potentially messes up the seek age or seek gap? I can't remember. So maybe we check the age things only + +[47:00] Once in the other fields twice and we like rearrange the data structure if it needs to be such that it's very obvious which section is the stuff that gets checked once and which section gets checked twice. In all honesty, like there could be some sanity to basically saying like hey, like these, preconcept, precondition things, we only need to check them once. But you're saying that this would currently allow things to succeed where currently they fail. Well, right now the preconditions aren't anything that can be that can change. State like: time is time, it's all checked against the close time, so it shouldn't matter. But yeah, they can't. Yeah, the thing is that right now we don't have a concept of checks that are done at apply time, before, like while we do, like the fee processing, for example, like we don't + +[48:00] Have that kind of concept- which is, I think, what we're talking about- like a phase, that is, like let's do all those checks that are like on the you know, the, only done once per block before anything else happens. That would actually be really easy to implement, like a bed, that would be easy, but that doesn't exist today. Right, agreed, yeah, and I think I wonder if it would do something funny also in terms of like results. Oh yeah, there'd probably be some annoying cases to handle there, but like, because we don't have a place right now to express that there would be a failure in that phase for like, for Horizon. I'm thinking like the mirror doesn't let you fail early, like that hold on, wait. Do we really not like early, like it doesn't there's, like it + +[49:00] Applies at apply time, you get the results, but you don't. You wouldn't know that it's a failure that happened because of a, an early check. There's something kind of interesting going on here, though, like I guess the real point would be: like these precondition checks are something that should be done at validation time but not at apply time, because if they succeeded at validation time, I guess you're right, yeah, that could also be the. That could be the change, right? The new, like the updated thing, is that we have certain checks that are only done at validation time. Yeah, I bet that could be useful for other reasons. So I mean the problem. The annoying thing is that, okay, well, one thing I mean it wouldn't be. I mean, like, arguably, the behavior that I'm describing here is not like terrible, it's just, unfortunately, like a slightly more permissive than what we currently do. It might not + +[50:00] Be inherently fatal, so David, one of the things that's actually annoying to do those checks at validation time only is that we're supposed to have a. as I don't. We're supposed to have a as more or less as an invariant that those checks don't depend on. Let's just state, I mean, like they only depend on sequence number. I guess, right, but you're still, literally, you're loading the account entry, right. Yeah, that's the only thing. But, like, if we wanted to have, like maybe other dependencies there, like in terms of checks, I guess you could. Everything depends on either the ledger header or the ledger entry. There's nothing. You don't need to fetch any, you know right, cross lines or any other kind of state. So I think that we should still be okay in terms of no extra database accesses. + +[51:00] I don't think that's a big issue. I think this is something we can handle in a sane way. I think it's just something I think I just think about, let me just like address it like I can make a pass of this and I can propose something that is both compatible with what we're doing today and has kind of the least overhead, like avoids double checking. If it's sane to do so, I have to think it. If it's sane to do so. I have to think it through a little bit more, but I could have you know something by next week or next week. I could do something, but I mean that feels like the logical next step to capturing one to me. Yeah, okay, so I have to address this and I have to address the nested ext structures and I think, yeah, like with this new condition, like one of the like I guess that paragraph or whatever section in the CAP will be where you can talk about + +[52:00] If we have, like specific rules around multiple transactions that maybe you want to like, where you want to allow, you know, like certain combinations, like what I'm thinking here is that if you have two transactions with this min age requirement, because otherwise you're going to break the payment channel, like the you know the assumption that you have a grace period every time you process a transaction for the payment channel protocol, that's a good point also, well, okay, I will, I think that's good, that's a good change. I think we're going to highlight this specific environment that we're going to go after here. That's the same environment that we need for the transaction queue policy, exactly, okay, so I think I need. Is there anything? + +[53:00] Okay, so I think I need. Is there anything else? Because I want to make sure we discuss [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md). No, I think we should move on to count 40. I mean, there's only four minutes left. So let's jump to [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md), because I feel like there's a clear path. With [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md). What is the? Let me see that, So [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md). Just trying to find myself. So, for anyone who's watching, this proposal allows participants in a payment channel to safely exchange signatures for a set of up to three transactions in a single step. It looks like there are some open questions- several, but we don't have time, that much time. So what is the most important question that you need answered here, lee? So I think I guess the question I've been most interested in is Nico raised. You know, is this the right structure? Are there other things, like local trees, that we could be using? And then maybe I + +[54:00] Can add a little more detail on: I mean, I have that on the mailing list, right, like that the question there is really. So I looked at so the proposal. I think makes sense with the current proposal of payment channels, because you only have like a very small set of participants. If you are looking at what you would need, if you have more participants, or maybe it's like those multi hop things or whatever, would that make this like insufficient? And then you need at that point, like some way of expressing a bigger set, like a miracle tree, maybe I don't know something else. I mean I think that what we have now is so general, like, yes, you can't do merkel trees, but you can like interoperate with, like Ethereum or like other blockchains, like it's. So not only does it like save entire round trips and like eliminate a whole bunch of foot + +[55:00] Guns on these payment channel protocols. But it also, like in such a straightforward way, adds support for essentially we can basically do htlc's with other blockchains without even the hash part right, like you can literally like just kind of tie transactions together across blockchains. So from my point of view, this is just like completely obvious in retrospect. Except, you know, obviously it's brilliant because, like nobody thought of it until now. But like this, like it's just, it's so straightforwardly, like exactly what you want in so many situations where it's like I want this texture only if this executes. Fine, if you execute this, then sign the other thing right and it's like we're done right. So it's so simple and so useful. I also had like a very similar reaction to this when we suggested I was like oh, that's very clever. No, like no, I mean I agree. Like I think it makes total sense for the, for what we're trying to do right now is it's more the question for me: is that going to be durable + +[56:00] For, in terms of design for the payment channel, because I know, for example, in lightning, oh, I think it didn't go that far because, no, but like for in the context of payment channels, they didn't do that with lightning. In lightning, for example, because they need a lot more to unlock a lot more transactions with one transaction. No, I think, honestly, I think they just didn't think of it right, because you could just say, if you have a three hop, like lightning payment, you could just like have to sign, you know all three hops in order to cash it in, yeah, but so you need three. Yeah, you need. This transaction needs to have three additional payloads, right, sure, so that's why this doesn't scale. That's the part where it doesn't scale. So that's my question. Let me ask you a question, Nico. Like sometimes there exists a world where it makes sense to have something that's like simple and easy to use but not necessarily scalable, and something that's harder to use but highly scalable, I feel. + +[57:00] Harder to use but highly scalable. I feel like the merkle tree falls into that category of like. It's obviously much more scalable, but it's also not as easy, friend, user friendly and not as easy to fit into like the current Stellar model. So like, well, maybe a miracle tree of one element, is the same thing, right, that's kind of what. Maybe, I don't know, that's a. It's more like a if the design relies heavily on such a construct. I'm more questioning the design of the payment channel. But, honestly, you could do the merkle tree. It's just the merkle tree would have to be on the Ethereum side, right, but like. But you know, obviously we don't have like logic to interpret merkle trees. But the point is like, because you're signing arbitrary data, right, you could do something such that, like a single payment on Stellar, could like unlock like 20, you know, USDC payments on Ethereum, right, and do some kind of complicated multi way exchange that way, + +[58:00] David, if we like, let's. So your point is well taken that if the merkle tree part was on the Ethereum side. Perhaps this would work there. So is the argument you're making that the construct that lee proposes is sufficient for the merkle tree case, assuming that Stellar could also handle the merkle trees, or is the payload not big enough? or you know whatever I mean. You know, to be honest, the things that the cases that I think come to mind involve multiple signers rather than signatures on multiple transactions. Right, because you could always on Stellar, you know we have a different way of unlocking transactions, which is like twiddle sequence numbers and use like the or the and use preconditions. So from my point of view, the real bottleneck here is that, just like, I don't want to add like + +[59:00] A gazillion signatures just because verification is expensive, right, so if we somehow change the transaction fees to like also include the number of signature verifications or something like, I could see like there's a very obvious way to extend this to have like more than two, more than one or two signatures, well, actually. So actually that's related to, yeah, that question on number of signature verifications. Actually, that's sorry, I missed that as part of the capturing in one conversation because you know, you guys added this: the extra sign at the same time, then kat von d and I didn't d, was not sure on which CAP was tracking, I know what one thing we could do, because in the context of [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md), it's just another sign. I mean the x ray signature. It doesn't matter actually, because it's being processed like any other signature, right, but we don't charge for signatures, do? We thought we just charged for operation but for so, yeah. So now, going back, + +[01:00:00] Though on to [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), but how it's being, used in a new way, right, that is, you can have, like those, is it two or three additional signature checks. I would argue that those should be counted towards the 20 signatures limit that we have on a transaction. Okay, that's fine, right, because that way we don't have to change the model in terms of like fees or anything, because right now it's, I mean, if we are going with the. Did you see what I mean? Like I think it gets too complicated. Done right, and that's a quick [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) change, because [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) just introduces the concept of this new type of- yeah, of cyano. I also want to say we're over time. So I think we're going to have to cut this, but I do. I mean I feel like with [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), there's a clear path forward and it's that David is going to make those two revisions with [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md). Is there an easy next step that we can take here + +[01:01:00] Just to keep this moving, keep pushing? Is there a discussion that we need to have, either synchronously later or async, on the Stellar dev list about this question? I think we should move to accept [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) today. Yeah, I feel like the argument about you know, would it be better if it's a merkle tree. This is a little bit like one or both. You know this is very simple and it does. It's, you know, only really useful in specific constrained cases, like the payload has to be below a certain size, that there's a lot of constraints. And so if the constraints are a match, this is a great use. And then if the constraints aren't, and we have use cases that need that are outside those constraints, then maybe we need some other type of signer, like a mogul tree signer. And then you know, with that sign, like comes a whole lot of costs and things we have to figure out bigger problems to solve. + +[01:02:00] I mean like the only reason I would say it should not be accepted yet is because we don't like it doesn't make sense to have kept fully in the abstract. It's actually in the context of [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). Actually it does make sense in the abstract. The same way we have hashtag signers. It's just like a better baby, like who is going to use that. Anybody who wants to like tie multiple. But who is this? Anybody? My point is that CAPs- what we have in the- you know, in the description of a cap- is that you have to have the use case before we actually go and spend the work on implementing those things. So you're advocating for holding off on accepting count 40 until yeah, I mean it looks fine, right, like from what we're saying, it's fine. It's more of a like we're not going to spend time implementing it, if you know, if we don't have the full story, but so we could just make a soft sort of promise. Okay, we're planning to accept, yeah, this one is like right after we + +[01:03:00] Accept [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). We're looking good, yeah, so ideally, our next protocol meeting, whatever that is, we can accept both [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), exactly. Yeah, I'd rather do that than try to do this piecemeal thing, because the point of all this is like to get something that makes sense for payment channels? Yeah, sure, but I do think [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) is just. It's so simple and it has so many additional uses. It's just like a fantastic thing in and of itself. But actually, one thing I would love to see in the [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) proposal would be a use case for [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) that doesn't depend on [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). That would be cool. I would like that. Lee, can you add like an Ethereum, like swap, sure, okay, so that's all that [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) will. That's like, basically, the requirement for [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md). Is lee's going to add a use case that doesn't reply, require [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md)? Rely on caption one? The goal for next time is to review the changes that David makes to + +[01:04:00] [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), ideally to get [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) shortly followed by [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) accepted. Cool, I think. Anything else we'll just look to the Stellar dead mailing list and everyone here. Thank you so much for joining, thanks for your time, thanks for thinking this through. Anyone out there who's watching. Thank you so much. You know it's always great to have you here and feel free to join that dev mailing list if you want to see these discussions and participate in them yourselves. There's a link to it in the show description. All right, sorry we ran over everybody, but thanks again for your time. + +
diff --git a/meetings/2021-09-09.mdx b/meetings/2021-09-09.mdx new file mode 100644 index 0000000000..9093126735 --- /dev/null +++ b/meetings/2021-09-09.mdx @@ -0,0 +1,160 @@ +--- +title: "CAP-21: Generalized Transaction Preconditions for Payment Channels" +description: "This discussion explores CAP-21, a core protocol proposal that introduces generalized transaction preconditions to enable secure, high-throughput payment channels on Stellar. The session focuses on design tradeoffs, backward compatibility, and transaction ordering behavior required to safely support off-chain payment workflows." +authors: + - david-mazieres + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [legacy, CAP-21, CAP-40] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This protocol discussion centers on CAP-21 and how generalized transaction preconditions make payment channels practical on Stellar. Participants examine how features like relative sequence constraints and time-based conditions allow parties to transact off-chain while preserving on-chain safety and eventual settlement guarantees. + +Much of the conversation digs into implementation realities: how new account-entry extensions interact with historical ledger states, how validators should forward and prioritize conditional transactions, and how to avoid subtle “foot guns” for developers building advanced transaction flows. The group also reviews how CAP-21 underpins related work and what changes are still needed before final acceptance. + +### Key Topics + +- How generalized transaction preconditions (such as `minSeqAge` and `minSeqLedgerGap`) enable scalable payment channels. +- Tradeoffs between protocol-version changes versus opt-in account flags, and why flags were rejected. +- Implications of new account-entry extensions on legacy tests and ledger invariants. +- Keeping time bounds unsigned for consistency with existing transactions and SDK behavior. +- Validator behavior for forwarding, queueing, and prioritizing transactions with sequence constraints. +- Interactions between fee bidding, transaction ordering, and denial-of-service considerations. +- Design questions around combining delay-based and sequence-jump preconditions in a single transaction. + +### Outcomes + +- Agreement to keep new account-entry behavior protocol-wide rather than opt-in, despite test refactoring costs. +- Decision to retain unsigned timepoints for transaction preconditions. +- Consensus to adjust transaction-forwarding rules toward simpler, first-seen handling to reduce abuse. +- Action items identified to revise CAP-21 text and continue async review before acceptance. + +### Resources + +- [CAP-21 – Generalized Transaction Preconditions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) +- [CAP-40 – Protocol-level support built on CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) + +
+ Video Transcript + +[00:00] Right now. Hello everyone and welcome to another Stellar Open Protocol Discussion. As per usual in these meetings, we discuss potential changes to the Stellar protocol that take the form of Core Advancement Proposal or CAPs. These are technical specs. They suggest changes to the Stellar protocol that allow the Stellar protocol to add new features and evolve to meet the needs of the ecosystem. We're live streaming them so that anyone who's out there can follow along. But again, I do want to point out it's technical. So if you are watching this, you should probably look at [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) in order to understand what we're talking about. That is what we're going to be talking about today, [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). And you should also join the Stellar dev mailing list where offline discussions about these changes take place. + +[01:00] Also there it we do keep an eye on the. discussion box. So if you put comments or questions in there, they do help inform our decisions going forward. We may not actually address them in this meeting, although if they're super germaine, I may bring them up. Today we are focusing specifically, as I said, on [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), basically, is a CAP that lays the groundwork for building payment channels on Stellar. So what we're talking about today sounds a little obscure. Keep that in mind. Payment channels are things that allow multiple parties to securely transact off-chain and periodically settle on chain and, among other things, that make it easier to build high volume use cases on Stellar. So changes to [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) are proposed by [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). They allow for payment channels. Last time we also discussed [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) and basically got it to a point where it was near ready to be accepted. But [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) is contingent on [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). And so here's where we are with [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md). There are a few outstanding issues and questions that we're going to try to walk through today. I think that we are actually quite close to getting this accepted, although + +[02:00] We'll see what the outcome of today's conversation is. So, with that in mind, let's just kick it off. [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) is what we were talking about today and I believe that. you know. The question is, I think that there are a couple of questions that came up on the mailing list and there was a question about transactions failing during execution that came up during the last meeting. But to start with, I think we should sort of try to deal with the outstanding questions that are on the mailing list and Nico, if you can just share the first of those questions with us so that we can start to discuss it. I was looking, yeah, I didn't have those questions open, hold on, sorry. So, yeah, I guess like the first question was more of a minor + +[03:00] Thing, that I minor I didn't, we didn't discuss, but that maybe is something we should be at least putting as like a potential thing. That is, should that behavior that we have for tracking the modified time, basically for the sequence number, this be an opt in right now, it's kind of making this blanket change right, like as soon as protocol whatever, as soon as CAP basically becomes active, we are going to kind of, yeah, make like track this new, less modified, basically in each account. I mean it seems like from an implementation. I mean the reason. I mentioned that as a flag right, instead of being a like a + +[04:00] Protocol base right, like in terms of the code. It's actually the same thing. Like you have to in the code to deal with all protocol version, new protocol version. So instead of checking against protocol version, we could check against the flag. The difference is that it would make it that the tests for sure are going to be much simpler, because now we can actually just test that part, like in a solution, fairly easily, whereas I know that lee in the past raised the issue, and I think that's true- that we would have to rework a bunch of tests, given the way they are written today in core at least, I don't know in outside. I mean it seems like a pretty big foot gun to have this flag right, because now this is something that's going to like catastrophically break all kinds of, you know, smart contracts and stuff. If, like, this flag ever gets cleared or if, like, people don't realize that it's not set. So it would be, there would have to be like a huge advantage + +[05:00] To having these like two kinds of accounts: one that just blanket failed preconditions and ones that actually implement the preconditions. No, it would not. So the preconditions would not fail. It's the last modified. Yeah, would not be enabled. Yeah, so I don't know if it's, yeah, like preconditions. I think that the CAP says that if the you know, if you take it there, it's treated as zero. Yeah, it's exactly, it would be zero. So I don't know. I think if you added the flag, we could. Actually, we don't need to have a default at all. We just say that a transaction is invalid if it tries to use that feature and the account doesn't have it enabled. And for any account to enable the feature, the new stack, that's a right now you have to like constantly check that when you're engaging in a protocol, that looks like it's okay, that account actually has this flag set. + +[06:00] So I'm really I don't understand what benefit could possibly outweigh, like I was saying, it's a faster implementation. So you know if we, if you know if no, I just wanted to mention because I know that was raised before. And by looking at the first kind of round of questions on that lee had, on that prototype, I mean it looks like it's a legitimate concern. I mean, so maybe can you explain a little bit better, like what the complication is of having it. It's around the setup. I think of test code. Like a lot of times we're creating accounts, we're doing all those things and I think those accounts are created without accounting entry extensions. Well, with the- yeah, with the default change, like that they would be created with the this extension and then when we do the, so + +[07:00] Basically we create like a snapshot of the ledger right and then we run a bunch of tests for different protocol versions and here the protocol version is obviously in. Like for older protocol versions, this would be invalid, an invalid ledger state. So I think there's a bunch of refactoring that needs to be done. I mean they've been involved ledger state. Well, an account with such an extension is not valid as of today, right? So I mean what happened when you upgraded to account entry extension v2. Why is this any different from the v2 v1 to v2? Beyond, v2 was an opt in right, like it was only when you were using. I mean from everyone to each other liabilities, right? So that's what one number sponsored and number sponsoring like sponsors. Yeah, otherwise we didn't change it, we would not and touch it. Clarify something: here is the + +[08:00] Way that the proposal is currently written. I think what I'm understanding is that when an account is created, it'll automatically have an accounting entry extension v3. At that point there's no way otherwise, right, and yeah. So basically, like, the reason that this will be confusing, David, is that, like there's like old historical tests, which is a lot of the tests, like many thousands, possibly tens of thousands of tests were written such a way that, like, the setup was done in the current protocol version and then you, like time, travel backwards in time and do the tests in the appropriate protocol version. So we'd have to go and fix that. Yeah, so you're saying that right now, if I create, you're saying that right now, if I create a new account, I don't get an account entry extension v2, correct, you just have null extension. Now maybe there's an argument that we should go and fix our tests anyway. Yeah, I mean, that's why I was saying like, + +[09:00] Yeah, I mean, that's why I was saying like it's a trade off right. Like: can I write you like an XDR write you? will automatically, I mean converter. That will automatically. I mean, it seems like we could. I mean, do the tests have like specific, like shot 256 hash values of ledger state? Is that the problem? Well, they can't, though, because there's other values like the ledger number and stuff, that aren't there. So what specifically is it that fails in the test? Like, maybe there's just like an XDR, there's like some template magic, invalid XDR at as of currently. So, if I just write a, an unmarching function that will unmarshal a new account entry to an old one by stripping off the entry extensions v2 and three, then we're good, right, all we need is that one function? No, not really. Like I mean yes, you could in. + +[10:00] Like I mean yes, you could in principle do that, but like in practice what's happening is like we have these like invariants that are written right, like they check that, like nothing is broken things, that things only exist if they should exist in effect, like no negative balances, blah. And in practice, like you could imagine, the sequence of operations is like you're in protocol version 19, let's say, or 20 or whatever. Whenever we get this done, you create an account. It has account entry extension v3. You travel back in time, you go and do some other operation. You know you're not doing like raw XDR operations, it's like literally a seller operation. So let's call it like a payment. It goes and does this payment. It goes, it loads up this thing. It loads it happily because it knows how to load it. It's valid XDR in the terms of like what the xtr is, sorry, but it is marshalled or it's unmarshalled. You're loading up the bytes, so you're loading. You're just have a simple structure floating around. You're loading up the literal bytes but like this is happening like deep inside of cell record, like not in the test, it's like you know, in the actual production code. + +[11:00] Then you like, load this thing up. It's happy because it's good. You go through, you do the operation, everything is fine. And then you hit the invariant and the invariant's like oh no, you're dead because you have this thing that shouldn't have been there. The right solution is just to fix the test so that they do the setup in the right version. Do you realize you guys are like massively making the case that I was making several weeks ago, that like we shouldn't be doing our extensions this way, right, because like, if there's any benefit to doing this horrible cascading nested thing that's wasteful of bites, wasteful of like program or keystrokes and wasteful of your right hand margin, it should be that, like this backwards compatibility stuff isn't an issue, but like, no, but David, this is not, this doesn't matter. Like this is invalid. This is an invalid byte configuration in existing critical version. That's what John just said. It's a. We have invariants that check that you don't have garbage in the ledger, right, that's garbage. Like, from today's point of view, it's garbage. Right, it's not bad. But I'm saying the extension: it has + +[12:00] Nothing to do with how extensions are set up. Right, but which should I mean? Shouldn't that be one of the benefits of doing these cascaded xdrs? Is this backwards compatibility with sources not by quad compatible? That's the point. Like it, this is garbage from today's protocol. I'm just saying like this is we have to rewrite the test for this. But we would also have had to rewrite the tests if we'd done it the original way that I did the extension. So I don't. I don't see what we've won by this latest change that I've made, because a lot of things have been written wrong in the first place. But the production code doesn't have to change at all. The production code all just works exactly the same. That's the thing. Let's see. Well, whatever, okay, so do we have to write about yourself? The issue here is just tests, right, like the in the production code. The way that it works as proposed, is good. It prevents there from being like a foot gun, but it requires us to rewrite a bunch of tests. Is that correct? + +[13:00] Yeah, I mean like the. So the reason going: back to the question I was asking right, the reason I was asking the question. was to see how bad would it be if we were making this- a flag, right? That's the question. Yeah, and David, it seems like it's about: yeah, it would be really bad, so that means we need to go fix the test. I mean that's kind of what you know. Okay, cool, fixing test is easy, it just takes time. Like, we just need exactly. It's going to be a bit of a time sync, but that's okay. I mean, if we're saying it's basically a non starter to have a flag, it takes either it takes time or it takes XDR trickery, right. So it may be like, oh no, the XDR trickery is going to be like, it's going to be validation protocol. That's why we can't do that. So let's, but let's. Yeah, that's fine, let's take it offline regardless. It's test hackery, right, yeah? So, yeah, I wanna just exactly one thing about like, if we do, if we were to introduce a flag, I think it would have an impact on the usability + +[14:00] Of this stuff as well, because, like you're saying, like we would never be able to just create a transaction for an existing account that hadn't enabled the flag, which means that if you have an account that's just sitting there on the network and you want to interact with it in this way and with some new contract that maybe doesn't require interactive setup initially, you can't just go and use these features of the network, which is sort of inconvenient, like. So that doesn't won't affect the payment channel protocol that we're hoping to use this for right now, but it might affect some other future thing. Yeah, and it's worse than that because we don't have spv, so like there's no concise proof that an account actually has this flag. So it means that the receiver side would like synchronously need to like query some trusted Horizon instance in order to like participate in what should be an offline protocol. So okay, so flags fix the tests. Yeah, it's bummer for the test, but + +[15:00] Okay, what was the second question time point? This one should be a lot easier. Yeah, the unsigned thing. So my question was: like I don't remember what happened there, but like making that change is there is actually not a lot of like? The mention in the backwards compat section is actually a bit misleading. It says that you know nobody cares. I'm actually not true. I'm not so sure. I'm pretty sure we do have in historical data transactions with large max time. And the other thing too is that to this code, regardless, you know core Horizon SDKs, they have to handle those large numbers. So in a way, like making that change just makes everything more complicated. So I'm not sure it's actually delighted to leave it unsigned. I originally had it unsigned. And then I feel like people objected to that. I mean all I + +[16:00] People objected to that. I mean all I want is like I think all our time points should be like a type def, right. So it's like the only thing worse. I mean I think either signed or unsigned is fine. It's just it's bad to have like a mix of the two. Right, like we should just be consistent about sign and unsign, because when you start comparing signed and unsigned numbers, as we all know, like you know, bad things start happening. So we already have, a mix, claymore balances, you signed time down to use unsigned. I mean we're there. So, oh yeah, okay, so I guess, out of luck on this one. So I mean I so I really don't care, I just want to be the same as other things. So I guess I'll keep it unsigned And you know we can add, we can regular transactions. That's kind of the concern I have here is that it changes regular transactions. + +[17:00] So I'll make it unsigned again, what make time point unsigned and we'll deprecate signed time points in future operations. Another option would be to make the time point inside the new precondition version 2 signed, since that's a new field. Nobody's going to be using that yet. We keep the old time point as unselling for backwards compatibility. I know it seems not worth it to me, but yeah, let's keep it unsigned. Okay, sounds good, all right. So time points remain unsigned. Where we got next one is around the section on how transactions get + +[18:00] Forwarded. So basically says that they are like those two criterias a and b, that allows a node to decide if it's going to accumulate basically this transaction in its queues, and yeah. So, like the first question is that so, yeah, so I think that the criteria b talks about lower signals. It's actually not clear. It's? It seems to imply that you are actually allowing transactions to be accumulated if we receive them out of order, and I think this is probably not the intent. That's the first question. Oh so, in other words, I say don't forward. + +[19:00] Oh so, in other words, I say don't forward it, but you should. It should be stronger than that. It should be just like throw it away, like should, yeah, like it should not be better. That's a good point, right. E is actually it should be. It shouldn't have this lower sequence number. I think on the does that make sense. Wait, what do you mean? There shouldn't be? No, you like. Right now you're saying it's invalid if either you know the condition is not met. So this is, those are easy, right? The b is what I'm interested in. Like, right now it says it's invalid if there is already a transaction with a lower signal. Sorry, it's invalid. No, it is valid. That's what your text says. Is the polarity: a transaction with a non zero min ck german sequential atmosphere is invalid enough forwarded if either a the appropriate condition doesn't hold + +[20:00] Or b there are pending valid transactions with lower sequence numbers on the same source account. Okay, so wait, so what's? And you're saying b, I should say: and in case b should also throw it away, instead of storing it in memory, well, it's in valid, right, so we are throwing it away. My the question: it's not, no, it's not invalid in a, it's in validity, it's valid. But they're things with a lower sequence number. So let me give you an example, just to make sure we're on the same page: right, so you have a transaction with sequence number two, but that could execute immediately because it has like a min seq noun, that's less than the default, and you have a transaction with sequence number one, right, so you could just execute two in isolation, or you could execute one, but you can't execute them both in + +[21:00] The same ledger. No, that understands, yes, like, but here, so if you receive so in your example, I have two right and I have two and that, and then I receive one. What happens then? One takes priority, like you, okay, so this, then it makes. So. Then this is kind of broken. Right, it's actually under specified, first of all it's. Why is it underspecified? Well, you're actually not saying that you're talking about, I guess it. So, yeah, you're saying you're kicking out highest transactions with higher signal. That's right, like, you'll still vote for a block, you know you'll still nominate a block that has it. But is that a good + +[22:00] Property? I mean, it sounds kind of weird We already have this. It's exactly. We already have this exact situation. No, we don't kick out transactions like this. Okay, let me tell you a situation I believe is exactly analogous, which is that you receive a transaction with, you know, a fee of 100, and then you receive another transaction, the same sequence number, that has, like, a fee of 200, right. And so now, if you've already, of course, forwarded the 100, well, you forwarded it, fine, but now you'll forward the 200, and if you see both, you'll only forward the one with the higher fee, and so that's the same way. We're basically like, among multiple, mutually exclusive but valid transactions, you need a way to prioritize them, and I'm saying here, you prioritize the one with the lower sequence number. Now, why do you need this question? + +[23:00] Oh, because the second one might, could later be valid, right? You're not. Yeah, unlike the case of the sequence numbers, you're not necessarily completely invalidating the second transaction. You're just it will have to execute like a- you know an hour later, or whatever the, you know whatever the min seq age is so, or men, whatever the, yeah, so you know, you might as well like keep them the most number of transactions valid. I mean if you're, if you are strongly opposed to this, we could, I guess doesn't really matter. We could favor the other one, but I don't. I don't see. I mean I like it sounds like kind of arbitrary. So that's why I'm asking, like, why the first one shouldn't win in this case. In some ways it might be more useful if the second one is yeah, because we often say that sequence, numbers is how you invalidate prior transactions. + +[24:00] So it sounds like if we say that the first one wins, then you could never use a higher sequence number transaction. With a min sequence number it was lower to invalidate a prior transaction, but someone could always- well, they might not be able to submit it because they might not. It might have a timeout, it might have a min, I'm in time right, which is generally how this works. So, like you know the reason I did this is because, like you know, it seems more useful to have like two. If you have two transactions, then they can both execute. It seems helpful to have it be such that both can execute. And you know, if you accidentally send out the second transaction too soon, like you know, I don't strongly care. If you want it the other way around, I can do it around. It has some implication, right from a dos money because you can basically like, + +[25:00] If you have this rule, right, well, you don't say first win, first one wins, you basically can. So, with the example, this will collect more fees. Right, the current draft will collect fees on both transactions. No, you're dropping transfer, you know. Let me finish. So with the example you give, you said it's the same thing when the fee with the fee. You have to actually outbid, right, so you have to constantly like if you want to cancel a pending transaction, you have to update it. Right, with this one you don't have to update. It can be the same fee. No, you are. You said the. That transaction that was in the transaction queue gets discarded because I have a lower sequencing number, I'm not currently discarding it. I thought that's what you were suggesting. Currently you're just not forwarding it. So just to be clear, David, what you're suggesting is like: imagine that I have. + +[26:00] Suggesting is like: imagine that I have seen like currency numbers 10, I submit a transaction with minstic 10, actual seek 12, and then I later receive seek 11. Europe, your approach- that what you, what we think is in the cap- and I've been rereading it like five times here while everybody's been talking to make sure I think that's what actually it says- is that you would actually keep both in that case. Because, yes, because you don't know like you could see a block that has either one. It's the same way like if you see, I mean I really, I think it's very similar to the, to the multiple, prices, to the multiple fees, right, whereas, like you know, you see you saw the fee of 100, then you see this fee of 200, so you forward that as well, and you know you still need to kind of- keep the fee of 100 around, because it could be that the block that gets nominated has that. So you don't want to like, if you see a hash of that transaction, you still will need to, like, you know, get the pre image of it right. We actually don't have that transaction, + +[27:00] So you might actually have to then go fetch it from another Stellar Core potentially, but the other thing we can do. The other thing, though, is that, with the fees back off exponentially, that's a big difference against this, and I think that's what Nico was saying. Like, I think I forget what multiplier we use. I forgot if we ended up using two or ten. We debated about it for a long time. But either way it's like you can only do this 64 times with fees, or maybe it's 10 times or 20 times with the like. The scenario that I've been trying to think about while I was rereading your thing repeatedly, is like: what if I have two transactions and they can't both execute because of fees? I don't have enough balance to pay both fees. But so, like, going back to my scenario, you're at 10, you have minty 10, seek 12, then you receive 11. The fees be the fees. Make the two transactions mutually exclusive. Which one do I take? Because then I do end up throwing out, + +[28:00] Because then I do end up throwing out the future one, if I take the other one, and how do you implement this and what's the right thing to do, like it's not obvious in that case, if the fees make it mutually exclusive, so like the implementation is actually harder in that case. Well, so hold on. This seems no different from today. So suppose, forget you know the min seq num, right? Suppose that I put out transactions you know with sequence numbers 11 and 12. Like, right now they'll both be forwarded, but what if there's not enough fee for 12? Well then you would throw that out, I guess right, but I wouldn't accepted 12 because you kind of, so you're in the so for pending transactions, you're sort of keeping track of the cumulative fee that's been charged. I'd have to go and check how we do this for fee bumps. I don't remember if we reject the fee bump if you wouldn't have the + +[29:00] Fee to pay for all the subsequent transactions, or if we- yeah, it's the same thing, we have to. Basically, you have to. Whatever is in the queue you have to be able to pay for the feeds. That. Are you sure that's what we implemented? I'm not positive. Okay, oh no, it's yeah, I'm sure, yeah, so then that would make this a little bit trickier. David, where it's like: then you have to handle the case like, oh, like I have this earlier transaction, but I don't have a fee for it. We should make it symmetric against the female case, I guess. So wait, so you're saying that the feedback, the outer transaction, doesn't have enough to pay, or sorry, what's your yeah, the outer, like, oh, it doesn't really matter. Like it like I both. I have both a normal transaction and a fee bump transaction that I'm paying for + +[30:00] On a different account. You're saying: and now you have to like prioritize one or the other. Does it even have to be on a different account? I think it could even work on the same account on different accounts. It definitely works like this: if you don't have the balance on this, the other account, then we'll just reject, you know. But there's different sequence numbers so I can issue like fee bumps on like three different transactions on three different source accounts, right, and if I can only pay for two out of those three, then like different validators are gonna forward, potentially like different subsets of these three transactions, right? So this seems like a very similar situation, so I don't know why. No, it's not, it is similar. If you, that's what I was teasing out. Right, like what you described in the. So imagine your mean is like one, you know, it's like whatever, it doesn't matter, basically it's set, but it's one. And now you're at: yeah, like the six, you get first transaction sequence number ten, + +[31:00] Then you get the second transaction, sequence, number eleven. That's what you actually described here. You're dealing with that case that is 11 in this case, you will consider it invalid because for now, right, because you have 10 and you can't process it in the same block. So you just, in this case, you just discard 11, everything is good. 11, basically, can be resubmitted later. All, right, now consider the this in the other direction: I'm receiving 11 and right now you're saying that 10 is valid. So if 10 is valid, I need to discard 11. That's the difference here is that you're discarding things from the queue. So you basically can have a situation where now, take this to you, know a thousand. I'm going to flood t 1000 then and I'm going to basically submit to the network nine, you know, like one less every time, and those are all going to get forwarded, they're all getting flooded and at the end only one gets actually in the queue, which is the smaller one. + +[32:00] So I use all this capacity on the. overlay, right, with no repercussion keywords. Okay, so you so. And this would be fixed if we picked the highest sequence number. If we pick no, you don't need you. So highest has the same problem, right? It's what you want to do is the first one wins, I think, independent of the sequence number. The first string don't care, basically you don't care, like, if there is the first one that's in the queue, then if the second one arrives, it says: oh, there is already something with a condition like that. Yeah, so the like. We can totally do that. The downside now is that you're more likely to get conflicting nominations if there's more than one leader. Yeah, but that's fine. It's + +[33:00] Actually better from a flooding point of view. The network is going to be much more resilient at this point and it's just: yeah, sorry, and is this what happens with female transactions, like in my example of? Because we should just be consistent. So in my example where I create three bumps, any two of the three are okay and I basically flood different pairs of them to different validators. They'll each forward like the first two that they got. I mean it's the same with p bunk. It's the same if you have, like you know, if I submit two transactions with the same fee and same signal, right, they're basically competing on the overlay, and the second one is always like a bad check, right, like you always say, and then just you just have to fetch it from the denominator. It's not that you fetch it, it's like whoever submitted that thing will have to resubmit it to the network. But what's going to happen is someone's going to nominate a block and, like a bunch of validators are going to be missing like one of the transactions in that blocks, and then they'll just all, + +[34:00] Like, have to make sure the validators that had the other, the, this other transaction in their queue, right when the block is going to close, they are going to discard the transaction because it's actually, at this point, in time it's going to be likely invalid and, like, depending on the condition, right, on your like, if you say, for example, all that, the, I need the transaction to be submitted, you know like five ledgers from now, well, obviously it's invalid right now. Right, and one thing you know, I guess my question is kind of independent of this particular example- right, we have this ability to actually create fee bump transactions and kind of spray out a bunch of mutually conflicting fee bump transactions. The result is that whatever block gets nominated is likely going to be missing. It's good looking to have transactions that most of the validators don't have, + +[35:00] And so of course they'll just fetch the pre image of that transaction hash. But that of course has overhead on its own. But you're saying like that's not how it was, kind of okay, oh yeah, like transaction sets are flooded independently of transactions today, right. So, basically, like you end up nominating a transaction set, I see the pre, I see the hash of that transaction set in the nomination message and I turn around and I'm like I don't know what that transaction set is, and then I have downloaded all the transactions anyway, sorry, so you download all the transactions anyway, even if you already have most of them. That's right, Nico, I mean you're more active. Yeah, I'm pretty sure that's right. Okay, yeah, that's the way to do it right now. Okay, well, then, that's easy. So I think the simple change is: you don't prioritize one way, it's just: first come first serve for forwarding? Yeah, right, I think that's good, cool, yeah, actually that's that was what I was asking with my question number three in the in that section, + +[36:00] Yeah, which is so there's like a. In the second question inside this, you know, section on transaction forwarding and ordering, there was this I was asking basically if we should do something about like bad properties, basically so we have like, with the current proposal, right, that we have for the payment channel, it's actually using separate transactions, for di and ci is basically like on, like one is the one that is allowing you to move the, sequence number, and the other one allows you to or uses the delay, right, if you were going to create a smart contract where you're putting + +[37:00] The condition and the jump in one transaction, you're going to have like those kind of delay attacks, right, you can delay those contracts pretty badly. So the question I had was: should we just not allow people- we can always, by the way, like leave that constraint later, but like for now, like if you make it that you can't have a mean condition combined with, yeah, like minsektom, I mean, yeah, you can't combine those. The two mean, second mean age. Basically, sorry, like you can't jump, you can't combine in the same transaction the jump and the age restriction. + +[38:00] What would that? I think I did. We're in the two way payment channel, or so imagine, yeah, right now, so the cur, the prop, the protocol for payment channel, doesn't have this problem because the two transactions, ci and the I, they actually have very different purpose, right, and one allows you to jump in the sequence numbers and the other allows you to delay, you know, to grab like those grace periods. Yeah, imagine if, yeah, but imagine if you try to do like a, and I know we're actually looking at that at some point- where it was actually the same transaction has allows you to jump and has the condition right, the on the delay, right, if you have. I'm not sure I understand. So you're saying that you would make it so that people couldn't use nin sikh num and men see gauge in the same transaction, in order to. Because if you imagine, like, imagine that pidi somehow could be + +[39:00] Combined- yeah, which is what this is talking about. Like, imagine you have a contract for your lrc in the ai to be combined, right, at that point I don't know what that means, because di is like: declares that you're going to do something, and then ci actually does it, yeah, but the way you could do it right, you could imagine having a cidi combined, yeah, on the same sequence number. Yep, you have one that is just the jump, and then you have another one that, oh, I don't know, like it's a different protocol. Right, like I'm not talking about the existing payment channel. I'm is where you have like: okay, you still have d, and so you declare something, and then you have this thing that could happen in the future. But then in the middle, maybe you have optional transactions that can be submitted like, maybe + +[40:00] That would be a use case for that. Like, that last transaction. You want it to be submittable without a specific sequence number of the account, so maybe you want like 10 sequence numbers in between and there are some other transactions that can be submitted before that. I don't know how you, why, that's not. I don't think that's what I'm describing. A transactions where you have both the open range right on sequence number and the delay Yeah, so that's what I'm. describing. So you have that open range so you can submit optional transactions between d and c. The c still has both. It needs to have the min sequence num so that it can be submitted, whether those optional transactions be submitted or not, but they will have the delay. Said, yeah, I see, yeah, you could do that. Basically like you jump and then you + +[41:00] Have like and you need to wait for a little bit to do those other transactions, but they are not going to be and if they don't happen, closing your final transaction can still be submitted after the delay. Yeah, so my point here was that if you actually allow people to craft such transactions, those transactions can be attacked because you can like. The assumption with those things is that you can always like- you know, I, because I have the more recent version of that transaction, I can always submit it in a way, but here it's not the case anymore. and I have because I can submit those state ones in the contract, right and now you can basically like, if s like the grace guys or whatever is like fairly big, like as in, you know, like + +[42:00] Media, more than a few minutes, and you have a lot of transactions that you're exchanging offline, you can basically submit all of them, all the ones that were until you expire, until you actually expire the window. Certainly I'm confused. Can you like imagine that I clarify something is this, is this: is your objection that this is like too restrictive, that there's transactions that you want, that you can't, that are illegal, or it's the other way around, that there's transactions, okay, all right. So what specifically, is the transaction that is bad? It's so. The transaction itself is not so it's not that you have bad transactions, that they are. You have like in a payment channel kind of situation, right, you exchange a lot of those things offline. That's why you need + +[43:00] To be able to jump forward right so that you have the latest transaction being processed. And what I'm saying here is that I think that is if you were designing a smart contract that was combining the two constructs. It's actually a foot gun. Like it's slightly wrong. Which two constructs are we talking about? The delay and jump is the reason why you're saying this is a foot gun. Like I'm trying to understand, just like David is as well. Like imagine that you have like a. The gap is like from sequence number 10 to 20, just to be super concrete. So you've got 11 slots, let's make it 10 to 19, so you've got 10 slots. And then, like the delay is one day, like the min seq age is one day. What you're, I think what Nico's saying, but I could be wrong- is that you actually might incur a + +[44:00] 10 day delay because somebody could play in each of the slots and then you go into the last one. Is that what you're saying, Nico, or is that not? I mean yeah, like for the delay ones. That's exactly what will happen. Yet also, the other way around, where you expect, like the, you say, oh, this is only valid for the next five minutes. Right, and now you can basically cause the thing to expire by replaying older ones, like it's. Actually, those would not be sorry, those would be like ones with conditions that are going to be absolute. I mean, yes, of course, if you, I can give you a protocol where, when I throw in time bounds or ledger bounds, like it doesn't work because those bounds aren't there, but like to say that's a foot gun. It's like, well, just don't use that feature if you don't need it and it won't get in your way, right? I just don't see it. What I'm saying is: + +[45:00] I just don't see it. What I'm saying is that I don't see right now a use case where you would put both. I can only think of bad things, so you can put both. So the argument is understandable. You're saying like we, should we have a tx conditions v2 and the tx conditions v3 or something? No, it doesn't need to be v1, d2, it's more like a tx conditions like relative delay and a tx conditions like jump sequence gap or something like. Should there be like two flavors of precondition that are mutually exclusive and that have subsets of these things, rather than throwing them all into one tx giant, tx conditions v2? I mean like, yeah, I don't think it's. Yeah, maybe annoying. I don't hate that. If that's necessary to go in, I don't see it. I guess I'm not. Yeah, I'm not. Maybe explaining very well, like the + +[46:00] Like I know that we were when we were looking earlier at this protocol. Some transactions had actually those two things you know: set, and if you're not careful, yeah, like you did you end up with a bad like, basically a broken contract, and you know it's part of like you know, kind of giving the ecosystem, like you know, the least number of things that they can trip on those present transactions. I don't object to that. If we want two flavors of tx conditions, I can do that. I can see you know there's some benefit in terms of like transactions take fewer bytes. When serialized there's some additional complexity. Then now you have to, like you know, check the union. Can we keep it as one structure? and then just have core, not allow those two things, fields- to be set at the same time. I was going to ask the same question. Like + +[47:00] I mean that seems like the worst of all worlds, because now we're adding, like, additional constraints that aren't expressed in XDR. So, like, reading the XDR is not enough to know like you can make transactions that look valid because XDR is valid, but they violate some rules, so that I don't like plus, you don't get the benefit of the more compact transactions. So I would suggest either keeping it this way or having two different kinds of tx conditions. I think can we maybe take this on offline, like I'm. Yeah, I feel like crosstalk syntax at this point. Right, but it's clear there are two choices and there is like a design decision involved. Right, one is like split it in two in order to protect users more, and two is keep it simpler, even though we are potentially leaving something in there that, although we could use an error, actually I do have a potential a use for this- + +[48:00] Well, maybe this is not a very good one, but like, but like you could worry about someone submitting a bunch of these transactions and like draining the fees from the account or something, and so maybe you want to make sure that you know the other person is only doing so many. But that's kind of bogus also because you could always just use a fee bump transaction to get the one that you want to execute through. So, all right, well, again, I'm pretty agnostic on this, so it's a binary choice and it's pretty clear what it is. So I think, moving it offline and just trying to find a way to just ultimately choose one or the other, that makes sense, okay, the only other thing I'd add to this, it's like we should probably convince ourselves that like it actually is always a bad idea to do this. It probably is Nico. I think Niko's point sounds right, but like I don't think any of the rest of us had actually considered that question, so we should probably take at least a few minutes offline to think about that. Yeah, okay, + +[49:00] What else we got here? We got six minutes, now more questions. I'm sorry, I took over the meeting. I think this is great because we're getting through these questions- concrete action items, all right. So the next things are around transaction validation. So yeah, so those are like things that I think this is. There's a section that talks about, like, I think, dual validation, right in the dark, that basically what happens basically during nomination, basically, and then what happens later, right, when you actually apply. I think this is what this is really about. So there's a thing that says all but the transaction with the lowest signum on a given source account might have + +[50:00] Zero for the mean sec age and mean sec, ledger fields. All right, we're nominated. Block of transactions, right. Asking is like, should we simplify this to just be something more like the rule that we just discussed for flooding transactions? And basically the rule is, if you have a mean sec you know thing condition on the transaction- you can never mix it with any other transaction in the same for the same source icon, like, instead of having this weird, like you know the lowest, I don't understand what we gain by having this extra kind of you know + +[51:00] Like basically special cases where you can put two transactions, yeah, I mean. you often might want to do that, like you want to bump a sequence number and then execute a bunch of things at that sequence number that are valid. But are you talking about, yeah, but like it sounds like it's complicated things for no real good reason? I agree with the fact that it's an implementation detail, like we could not. This one is actually part of the. This one is part of the protocol. That's why this one is actually very important because, yeah, so the question said valid, so yeah, so the question is like: do you have, like you know, could you have like sort of various sequences of transactions that you can execute with? You know? So, after some + +[52:00] Delay, right, and one reason you might want to do this is suppose, for some reason, you don't want to use claimable balances or you're worried about something failing right. You'd want to break a set of operations up into several different transactions in case, like, one of them fails. So the sequence number's consumed and you can execute the other ones right, and so then, if these, if this kind of series of things you want to do, has some relative delay, then what you do is like the first in the sequence would have like a min seq age, and then the others you would just kind of like not, because you can only execute them with the right sequence numbers. So to me this seems like something you'd want to do. It doesn't seem like a particularly hard thing to implement. I mean, you're already checking that all the sequence numbers are consecutive or that they have mint numbers that make sense. So just to sort of say that, like other than the first one, the min cage also has to be zero. It doesn't seem like a big deal. But + +[53:00] I mean small conditions, right? Sorry, I don't. I was definitely useful, right. So the only reason to remove it would be if it's like really hard to implement, right, it's going to. You have to also do that in overlay, right in the queues, like for all the queues that are being managed, well, they all are going to have. You have to solve them. Yeah, they have to be sorted and yeah, like, yeah, but we already have to do that because we have to make sure, number one, it's more like now you have like certain conditions between, depending on the order, right, press, you know, if you think of, I think what we are talking about earlier, is that okay, if there's any transaction in the queue, you know, I don't, I can't add the condition, for example, if I can, I can't add that transaction. Sorry, + +[54:00] Right, but that's why we already talked about that. We're gonna. Oh, sorry, what I'm not particularly offended by what you have. David, just to provide a counterpoint to Nico, like I think there could be merit to pipelining and I don't think like I don't think compared to the already very large amount of condition checking that we do in, especially in the transaction queue, I don't think this would be a very material change. That's my opinion. If we think that this kind of pipelining situation is like not only possible but plausible- and I think it probably is- I don't think it's that bad, personally, at this point we're also out of time. So, okay, are you okay with that Nico, or do you still? You're still not convinced? No, I say it's probably fine. Yeah, it's just more conditions, like I said, okay, this one, I do think it's useful. So, + +[55:00] Okay, cool, so leave this one as is because it's useful and possible. Unless you know, obviously, I think that perhaps what happens now is that we move this back to the mailing list. There are a few action items for you, David, to make changes to, and a few things we still need to discuss async, but they're pretty simple, straightforward decisions to make, and then that also gives anyone time. If you know, Nico, if you think about this last point and decide that actually the utility may not warrant the actual work or something, you know, there's time to sort of bring it up. But it feels like these are fairly discreet changes and action items that can actually get this thing in a position where we can evaluate a draft, possibly with an eye towards acceptance, very soon. Does that seem true? Yeah, all right, well, awesome everybody. Thanks again for being here, thanks everyone at home for watching and I'll see you all soon. + +
diff --git a/meetings/2021-10-14.mdx b/meetings/2021-10-14.mdx new file mode 100644 index 0000000000..89ff1a34b0 --- /dev/null +++ b/meetings/2021-10-14.mdx @@ -0,0 +1,141 @@ +--- +title: "AMM Final Guardrails and Issuer Safety" +description: "Final review of CAP-0038’s Automated Market Maker implementation, focusing on last-mile guardrails, issuer safety semantics, error-code alignment, and edge-case handling ahead of the Protocol 18 network upgrade." +authors: + - jonathan-jove + - justin-rice + - nicolas-barry + - siddharth-suresh +tags: [legacy, CAP-38] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session served as a post-implementation validation of CAP-0038, confirming that the final AMM behavior matched protocol intent before the network upgrade. With the CAP already approved and implemented, the discussion focused on reviewing implementation-driven changes to ensure correctness, predictability, and consistency with existing orderbook semantics. + +The group walked through each adjustment made during implementation, paying particular attention to issuer control, withdrawal guarantees, error reporting, and how AMMs interact with trustlines, sponsorship, and path payments. The overarching goal was to ensure AMMs remain safe for issuers and integrators while behaving intuitively for wallets and applications once Protocol 18 is live. + +### Key Topics + +- Alignment with orderbook semantics + - Liquidity pool deposits and withdrawals now mirror offer behavior + - Liquidity providers can always withdraw shares, even when authorization is revoked (maintain-liabilities state) + - Deposits remain blocked when authorization is missing +- Issuer safety and revocation behavior + - Revoking asset authorization predictably unwinds liquidity pool positions + - Pool shares cannot become permanently “stuck,” even under extreme price movement + - Revocations may result in claimable balances rather than direct refunds, ensuring safety in edge cases +- Error codes and result consistency + - Clearer, more accurate result codes for AMM deposits, withdrawals, and path payments + - Fixed inconsistencies where liquidity pool paths could not realistically return legacy orderbook errors + - Added missing validation and malformed results to match protocol conventions +- Trustlines, limits, and reserves + - Correct handling of trustline limits (e.g., line-full cases on deposit) + - Clarified low-reserve and too-many-sponsoring failure cases during revocation flows + - Ensured pool-share bookkeeping behaves safely without introducing new limits risks +- Claimable balance handling + - Simplified claimable balance identifiers by removing unnecessary ID types + - Standardized behavior so revoked pool shares always resolve via claimable balances + - Acknowledged ecosystem implications and the need for wallet support and education +- Future-proofing expectations + - Noted that AMM mechanics and parameters may evolve via future protocol upgrades + - Liquidity pool shares should not be assumed to exist unchanged in perpetuity + +### Resources + +- [CAP-0038: Automated Market Makers](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) + +
+ Video Transcript + +[00:00] Foreign. Welcome to the Stellar Open Protocol Discussion. In these meetings we discuss Core Advancement Proposal, aka CAPs. These are technical specs that suggest changes to the Stellar protocol necessary to allow that protocol to continue to evolve to meet ecosystem needs. We live stream these meetings so that anyone who is interested can follow along. I do want to note it's a technical discussion. So if you're watching you should take a look at the CAP we're planning to discuss which is linked to in the show description. It's [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md). Also, we do keep an eye on the discussion box and your comments there. They do help inform our decisions. That said, today's discussion focuses on reviewing + +[01:00] Final changes to a CAP that's already been approved and our goal is to acknowledge and validate those exchanges. So you know we'll again pay attention to that discussion box and if relevant questions come in we may try to incorporate them. But we do have this very specific goal. As I said, today's discussion focuses on a single [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) and this meeting is basically the end of its life cycle. That life cycle started with the discussion on the Stellar dev mailing list and there's a link to that mailing list in the event description for anyone interested in joining and participating in future discussions. After that discussion the CAP was drafted and then iterated on based on feedback and suggestions that came in and once it's settled it was approved by the CAP committee, entered into a one week Final Comment Period and then it got through that period. It was implemented in a major Stellar Core release. It is now officially part of Protocol 18. Now there's still a final step before that protocol hits the network, which is that validators have to vote to upgrade the network to Protocol 18, and that vote is currently scheduled for November 3rd. Before that day, anyone and everyone who + +[02:00] Builds on Stellar needs to install new versions of any Stellar related software they rely on, including Stellar Core Horizon, also including all the Stellar SDKs. There are breaking changes in Protocol 18. If you aren't running up-to-date software with the network upgrades, you will run into trouble. So if you're watching this, you haven't already. Please make sure to update all your Stellar related software content. Okay, enough preamble. Today we're returning to [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md): automated market makers. It introduces a powerful new feature that has the potential to really transform the network, which is the ability to create automated market makers, or AMMs. Amms allow users to create deposits into liquidity pools. They also allow buyers and sellers to trade against those pools. They use in the underlying formula to value two assets relative to one another. When trades execute and change the amount of each asset in the pool, the relative prices shift based on a formula. It all happens automatically. So in the case of [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md), trading against a liquidity goal is done using existing + +[03:00] Path payment operations, which is pretty neat because it automatically gives applications built on Stellar access to this new source of liquidity. It also means that liquidity pools benefit from real world cross border payments and Stellar is the first layer one blockchain to incorporate a m functionality at the protocol level and the first to combine it in this way with cross border payments obviously we're super excited to see what this feature can do to boost overall network liquidity that said back to the technical discussion as I mentioned at this point [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) has already been approved and implemented but during implementation there's always a bit of tweaking that needs to happen and the goal today is to take a look at the tweets that happen during [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md)'s implementation and verify that the final adjustments are safe and sound that they make sense so this is the post implementation validation phase and hopefully the final phase for [CAP-38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) and so what we're going to do is we're going to look through the changes that have happened in this post implementation + +[04:00] Review so before we do that I'm just gonna walk through them one by one but before we do that does anyone have any questions we're good so here we go the agenda is to walk through the xtr changes and we're going to do it in approximate chronological order so the first change removed liquidity pool ID from union in liquidity pool entry and so I guess my question is does anyone have any questions about this does this change make sense are there any concerns so when we did this I think Siddharth might have made this change yes Siddharth made this change but sir do you want to talk about or do you want me to yeah I can talk about it so the reasoning for this is that the + +[05:00] In the original capital liquidity pool ID was under a union but the liquidity the electric key didn't have the liquidity pool ID under a union so you know which means that every type of liquidity pool entry would need that ID so you might as well just take it to the level above that instead of specifying it for each new union we specify in the future so that was the reason for this and the general assumption here is like probably we're going to keep referencing things by hash anyway there's no reason to think that we would change the liquidity pool ID so we might change our hash function right if we decide to move to ketchup from like shot 256 or something yeah does this make that harder or is it just going to be a pain anyway I think it would be pain no matter what because we do a lot of hashing in a lot of places probably it would be sufficiently disruptive that we would end up introducing new ledger keys anyway for that so + +[06:00] That's my intuition at least I see Nico nodding so I think he probably agrees with that okay I think we're fine then if nobody objects I mean I don't expect any of these changes to be really objectionable I just think this is kind of one of those better safe than sorry type of things that makes sense okay second change use clean adam and manage offer success results this one was a change that I made the main gap here was that like when I originally drew up the CAP I had kind of assumed like oh we should make the minimum changes that we would have to make to do this it would be less annoying for downstream systems and stuff like that but then there were two realizations the first realization was well then downstream systems need to know how to handle both kinds of claim atoms independently + +[07:00] Anyway so they will end up with code duplication and they'll need to know which ones to expect in different scenarios and we'll have to maintain both in Stellar Core so it's like all the bad and none of the good the only catch was that it was like slightly breaking for people who actually parse XDR results so my so when I that's a good question Justin when I wrote this I thought the answer was like approximately no one I thought that it was basically just like Horizon and a few other like very specifically written downstream tools like StellarExpert that were doing things like that but it actually turns out that a lot of the SDKs actually had this kind of like result parsing code for trading operations rolled into them I don't interact with the SDKs that much so I wasn't aware of that perhaps I would have changed the calculus a little bit on this but having + +[08:00] Already done the work I think it doesn't but at this point I'd my expectation is basically anybody who's using one of those SDKs and interacts with like trading operations probably needs to update prior to the release or prior to the vote excuse me it needs so the SDK itself can handle the XDR person and those people will need to upgrade their SDK exactly and that will generally solve the problem for people that don't have some custom XDR parser which it seems like it's not a ton of downstream systems since most people rely on SDKs this sounds like a good precedent to set that like if you're parsing XDR keep up with protocol and the whole point of xcr is it's not hard to do the upgrade but yeah just need to update your xcr files I like that precedent it's official it's the official press don't know + +[09:00] Okay any other questions about that change looks like the next two are added new results to revoke sponsorship mail forms and changed validation results for revoke sponsorship to all be malformed you understand or should I you made one of the changes I made the other you can go ahead okay so basically this change is a bit of a throwback actually to I guess that was Protocol 15 or 14 when there was a side issue with the release and we had to make some retroactive changes and that became Protocol 15 and when we did that we didn't want to change the XDR because that would have required us to basically force everybody to update even more than they had to like all their clients and + +[10:00] Everything like that we elected not to do that and so basically there were validation results for revoked sponsorship that didn't return malformed which is like a violation of our convention even though it doesn't matter in the grand scheme of things so these two changes one adds the malformed thing that was missing and the second one just retroactively applies it to all things that were not correct as a Protocol 15 should have zero impact basically okay any anyone have any thoughts questions all right the next one is changed to m32t in liquidity pool use count + +[11:00] Yeah I can take this is following our convention of using signed ins and the XDR and we like this number will never get close to you when like even in 32 max so like this just made sense David you're muted if you're trying to talk why it's not inconceivable to have two billion trust lines and so it makes me pretty nervous that you changed signed unsigned but none of the like none of the actual range checks seem to have changed so the reason this is so an account can have a trust line right but when it wants to deposit into a pool it has to create you know a pool share trust line so you can't have more than I don't know I'm not sure what the exact + +[12:00] Number is but you can't have more than a couple hundred full share trust lines right why not you can have just do it you can subscribe if you have a billion accounts that are or if you have three billion accounts that all own shares in the same pool wouldn't you that will have its own trust lines right this is on the trust lines like the pool share trust lines not on the pool itself so it's like a per account counter yeah sorry okay maybe say what this what is a count what is the use of a truss line what does this thing actually count so this is making so we have this requirement where you can't delete what is an asset trust line until the pool share trust line is deleted right and this enforces that sorry totally makes sense yes this change is innocuous + +[13:00] Yeah completely misunderstood sorry about that no problem I feel like that's what we're here for it's just like make sure that these things are clear and that there's not some something that we're overlooking okay the next one is added new liquidity pool deposit result which is the result is line full this was just a small bug in the XDR the pseudo code already considered the case where you could get line full upon deposit which let me just look at this for one second to make sure I'm about to say the right thing oh sorry this is yes mindful it was already possible that you would have like the maximum number of pool shares that you could hold relative to your limit and then when you deposit you wait let me read this for once yes this is + +[14:00] Deposit sorry about that little confusion yes you already have the maximum number of pool shares that you're willing to hold you deposit you can't hold the pool shares that you would have received until you get mine full it was already handled in the pseudocode missing from the XDR so just a correction again that just seems to implement expected behavior as far as I can tell is there I mean well I think this is a more general question I feel like at some point we may want to revise what the use of the limit is on trust lines let's not go down that rabbit hole I totally agree with you on both sentiments that we probably need to talk about that and also that we probably don't want to talk about it right now Nico and I have been talking about it a bit actually and it turns out that there's some tricky details when you try to change how this works + +[15:00] Plus line limits we'll get back to that next make liquidity pool deposit and withdraw semantic match the semantics match offers yeah this was a this was just like a so just a design flaw in the original thing you know our goal has been to make you know liquidity pools work exactly analogous to offers in as many ways as possible and this was just a gap there where basically like we were ignoring the fact that there's asymmetry and awe semantics for offers where you can delete even if you're not fully authorized but you can't create when you are fully when you're not fully authorized + +[16:00] But for liquidity pools we were just saying like if you're not fully authorized you can't do anything and that doesn't quite make sense so yeah so I believe that the so one like the I think the biggest change that you can withdraw if you're in the authorized to maintain liability state right so you can it's the same thing as pulling offers right and if I'm not sure if this is specified here but if you're in the unauthorized state your pool shares are redeemed automatically right so you can't be in a unauthorized state with pool shares and you're allowed to pull pool shares if you're in the off maintained liability state and you can't but you can't deposit which is I think was the old behavior as well you can't deposit if you're not + +[17:00] Authorized so just to check my understanding this means that for example if I am an issuer of an author required asset and I don't want there to be an asset between like my mark my market between my asset and say asset x I could basically revoke authorization from everyone who's contributed to the liquidity pool and that would just automatically kill the liquidity pool is that right okay yeah and that's the intention that's the intended behavior great next is added new results to allow trust and set trustline flags which is low reserve and clarify what happens with zero amount + +[18:00] Yeah one sec let's see sorry I need to take a look at this change so I believe that this change is specifying so originally in the original invitation we assumed that the revoke was guaranteed to work for well when you were like the redeemed from the pollution trust line is guaranteed to work when you revoke off but there are some cases where this isn't true specifically if the account that needs to sponsor the claimable balance is in itself being is this in itself is in a sponsorship sandwich we can't guarantee the state of the sponsor so it's possible to hit the low reserve or too many sponsoring error codes so we this is adding that and is there might be a second change here yeah and the other change is that + +[19:00] It's you're not always it's always guaranteed that a claimable bouncer will be created on revoke for example if the claimant will be the issuer or if the amount being withdrawn is zero for that asset so this just specifies those cases I'm actually confused I didn't understand what this does how do claimable balances come into play here so when you revoke on a trust line and that trust line had deposited into a liquidity pool right the way this works is those pool shares will be withdrawn it will be redeemed automatically and then it's but we can't just we + +[20:00] Can't guarantee like originally we wanted to send it back to the trust line right but we can't always do that because you know the trust line might be full so the solution here is just create a claimable balance for that asset and then the owner of that trust line can come in and claim it whenever they want to and the reserve for the claimable balance comes from the previous trust line for the pool shares exactly yeah but so yeah it seems like a little bit of a foot gun though because now this is like a case that's like not going to happen very often and when it does happen like probably a lot of wallets and other software is like not going to do the right thing or it's just you're not going to notice it are you talking about like managing claimable balances well sorry this is a situation where usually you just get the funds back and like you know in weird edge cases you get a claimable balance instead + +[21:00] Sorry I wasn't clear we create camel bounce every time we don't even attempt to send it back oh I see okay yeah that was the game okay so that's actually good so it's uniform behavior it's a little bit more complicated than just refunding it but it at least it doesn't have this edge case yeah and we acknowledge that this is like really quirky like Siddharth and I spent many hours on the phone trying to figure out if there was a way to not do this and it turns out that basically the crux of the issue here is like you can do you can avoid this issue when you have offers involved because offers have fixed liabilities and they never grow or they only grow upon like a manage offer operation that you can control but here like imagine I deposit into a liquidity pool somebody immediately trades it into some crazy price and now you can't withdraw so it's very easy to get into like a unwithdrawable state so it would have been great if we knew how to avoid this + +[22:00] But it is all we could do is this gonna be mysterious to developers that they're getting these claimable balances instead of direct transfers of funds I would think it will be kind of mysterious I think there could be I mean if we were starting from scratch I feel like a better design would be to say you can't have more than like you know two to the 63 of any asset and there's no there's the limits are either there's only a trust line or like we get rid of limits on trust lines and whatever but I mean like given where we're starting like maybe this is reasonable probably the least bad like situation a lot of weird quirky things it turns out could have been avoided with asset issuance limits like David was saying it could have helped in a whole bunch of different avenues actually but that's not the world we reside in but maybe in the future it will be + +[23:00] So and by the way I just did a looked at the ledger I think yesterday and it was like thousands of accounts at this point have some whether you have like more than in 64 max it's actually a thing that people do now it was in the tens the last time I checked which I think was in March or April when we were issuing accounts tons of assets basically and now it's thousands you're saying yeah hug maybe we should say that there's there should be a flag on the asset saying whether you can flag an asset saying you can't create more than two to the 63 of them and then you get access to all these good features only when that flag is set or something but anyway we are where we are so I'm just wondering if there's this when I look at this I'm like oh shoot is there some education does this imply + +[24:00] That there's some sort of education clarification outreach that needs to happen so that people understand this quirk I mean one thing we could do is we like this is not really a protocol discussion but I agree that like education is a part of what we do and so it's not necessarily something we should ignore it would not be unreasonable to create like a tiny javascript program or python program or whatever uses one of the SDKs and basically what it does is it goes it connects to the test net you set you pointed at a wallet and basically it like goes through the steps of making this happen and basically we can give it to wallet developers as a tool it's like hey like this is we want to make sure you handle this is something that can happen like run this program if your wallet works you're good to go if it doesn't you have a bug the other thing I'll say is that like this like the claimable balances you receive will look exactly like claimable balances you would receive in any other setting so wallets that handle that will probably do the right thing even if they might not reveal it to users in the + +[25:00] Most like sensible way yeah I guess like the question here is more like yeah what's the level of support of for claimable balances in the ecosystem today I think it's much better than like even like two months ago but I don't know where we are yeah I feel like a few months ago there was zero support and now there's increasing support because people understand what claimable balances are I mean unlike this time right where we're spending time with an adoption plan that was no we didn't really have a clear sort of and this is not related to this discussion earlier to this group but I was just saying like we didn't have a clear adoption plan for claimable balances so it took a little longer for people to catch on but you know maybe now claimable balance support gets folded into the education about liquidity pools given that this is a fact + +[26:00] Okay well that's very helpful for me I know it's not strictly protocol discussion but thank you and I believe we're at we are where we landed at this is what it is this is the world we live in this is okay all right then next added missing validation conditions for liquidity pool deposit this was a another bug fix but a pretty bad bug fix or pretty bad bug basically would have been possible to do things like I mean submit very nonsensical I guess very nonsensical operations where you could just say like hey like you know my min price is bigger than my max price so it's impossible to + +[27:00] Succeed but you would still actually go through you'd make it through validation you'd actually get to a bot time you'd actually do some of the apply stuff only to find out quite through the way of the work that you would fail unconditionally so just a bug fix that just makes things sane great fixed incorrect error specification for path payments oh yeah I remember this one probably though I would not be surprised if people have things to say about this one just because it is it was very hard to express what actually happens the original draft of this that we accepted on like June 23rd or something like that + +[28:00] Basically what it said is like if you make a path payment and it fails because of stuff happening in the liquidity pools you would get the exact same result as if the liquidity pools had never existed at all but this is like it doesn't take a lot of thinking to think like this is actually probably not possible because for example like what if like imagine you know the original like the original draft of the original language I should say it's like in the first hop you know you're going from a to b and you have both a liquidity pool and an orderbook and you can do it through either path but then on the second half it fails because of something to do with the liquidity pools and then it's like well actually if I in the first path if I had actually take or sorry in the first hop if I had taken the path to the liquidity pool I'm not necessarily going to end up in the same state as if I had taken the path to the other side and then I'm not necessarily going to get the same error code regardless so things get pretty got pretty hairy + +[29:00] And it was all about just like designing or not even designing defining a set of rules that we could actually follow to make it predictable what kind of error codes you would get I don't know if we need to get into the details of like how it actually does that because like from any user's perspective they're just error codes but it was all about making a specification that we could actually follow and check that we did correctly when implementing starcore we learned a lot while dealing with this problem though actually that was in that sense it was really enlightening to try to understand this and try to figure out how to write the right stuff but I don't know if it's very enlightening to get into the weeds about how it works I'm just curious can you give an example how is it that a liquidity pool can make a path payment fail or you're saying like it would have failed without the liquidity pool it also fails with the liquidity pool but therefore give the same error + +[30:00] Message if you didn't have liquidity pool or just an example would help here yes so there is an example here but I can definitely give you an example that might be more pertinent you could well actually you know what I'm gonna mess it up if I try and do it I'm gonna mess it up if I try and do it off the top of my head let me look at the example that I wrote here and just walk through that unfortunately it doesn't give the example of the weird thing happening so you've put me on the spot but I'm gonna try to produce an example for you okay here's an example you have no orderbook at all in this case it's a single hop but you do have a liquidity pool the liquidity pool + +[31:00] Has some liquidity in it but not enough to give you a good enough price when you say no order so the path payment meaning is from a to b there's two assets involved and there's currently no offer no traditional offers exactly there is a liquidity it's very small and so there's a ton of slippage when you trade against it and as a consequence you like you can do the conversion you want to do but you don't get a good price and so in the old world like you know if you only went through the path payment like if you only went through the traditional orderbook you would have gotten too few offers but now you go through the liquidity pool side you actually can do the conversion so too few offers is not a factor but then you get to the end and you don't satisfy the constru the price constraints on the path payment and so the original language said like well I should have returned to the too few offers but that's obviously not something that I could even like if in a long multi hop payment it might + +[32:00] Not even be possible for me to figure out what I should have returned without having to go and rerun it all in the absence of liquidity pools which is obviously super inefficient for no gains so yeah okay and so to be totally clear there's no such thing as a path payment that would have succeeded except for the liquidity pool there's no way the liquidity pool could make something fail it would otherwise have succeeded right I think that is correct yes okay because you always get better prices yeah with the liquidity so it's another option so it's not okay I do believe that is correct yeah I think the first time I said it I said something that would have implied that wasn't true but that's right I think that's where I was getting confused so basically now you won't get too few offers because that's doesn't make sense because it's a liquidity pool not an orderbook right you got some other error it is still possible to get too few offers actually even in the case + +[33:00] Of the liquidity pool like if the look this can only I don't remember for which this can only happen for path payment strict receive imagine that you have a liquidity pool that has a hundred dollars in it and 115 euros in it that's not like exactly the exchange rate but it's pretty close so let's pretend and then I sent a pass payment strict receive where the destination receives two hundred dollars and I'm trying to send euros this is obviously impossible in the liquidity pool there aren't two hundred dollars to come out so the trade can't be done if there was no traditional orderbook you still end up getting too few offers because when we can't do the trade in the liquidity pool like on a given step we will use the result from the orderbook side that sounds a little weird but basically like the logic here is like try to do the trade on the liquidity pool + +[34:00] If you get a good result good to go if you get an error just like throw the error out who cares then go to the orderbook try to do the trade there if you get a better price and you got the liquidity pool was possible do the orderbook if you get a worse price but the liquidity pool was possible due to the liquidity pool and if they were both errors return the error from the orderbook yeah this makes sense it's weird it's not like weird to think about but it was hard to express all of this but we're hitting the exact examples that we walked through when we were working this out but it's just like a reasoned decision about where the errors come from and what the order is okay exactly just need to do something consistent and predictable basically + +[35:00] Yeah I can take this so this is I mentioned this earlier but we in a previous line that we just reviewed we added a low reserve error which is like on revoke if the account that is going to sponsor the cranial bounds is already isn't a sponsorship sandwich this is essentially adding at the another error code where it's possible to get too many sponsoring as well if that account if this account sponsoring is has is already sponsoring too many entries so yeah that's it so we have a counter that says like how many entries you are currently sponsoring and that has a limit I believe it's + +[36:00] And that has a limit I believe it's in 32 max or you and 32 max most importantly it's limited by your lumen balance isn't it yeah all right but you can theoretically get to that amount I see if you have a lot of that yeah the limit if I should be a little more accurate the limit is actually too many sponsoring plus nub sub entries which is a detail that allows this allows the revoked to be guarant the revoke to work like guaranteed if you're not in a sponsorship sandwich but that which is a detail in the CAP but if obviously we don't like in this case it's a little different which is why we need to check for that error code the reality is exactly what you say David though like the only limit that we ever expect anybody to hit is the sub entry limit all the other limits they exist purely because like we have to do something that has to be well defined but like the circumstances under which you would + +[37:00] Have to be and to hit those limits are like so unusual you have to have like three billion base reserves or something exactly one and a half billion lumens like well like it's possible but like why would you be in that situation why would you be sponsoring so much stuff like it's just weird so and then we interview the person who got to that state well there's a you know most like 20 people or something who can do that right if there's 50 billion lumens available so all right the final change removed new claimable balance ID type yeah so I can take this as well so in the original CAP invitation we added a new claimable balance ID type and also the balance ID is hashed off of a + +[38:00] Different objects essentially or a different union in a struct and this is actually unnecessary because we're already like the hash already is that has a different type in it so why were we at the higher level also include a different kind of bounce ID type when it's not necessary so we just remove the new claimable balance ID type yes because we still have the new type and the object that we had the passion yeah I think originally we thought that maybe by having this extra type we could basically derive the reason on where the where this came from but then we realized well actually you already have to go to the transaction that created the claimable balance anyways so + +[39:00] This was actually kind of useless I mean like for existing playmobil balances you already have to go through this process so there was like it was a shortcut that was not actually doing anything useful well that's all the changes that happen between acceptance and now and I feel like you know I definitely learned something about some of the choices that we made and implications that they have really outside of the protocol mainly in terms of what we can be telling people but basically it doesn't seem like there's any work that needs to be done to revise or undo any of those changes does that seem correct yeah I have + +[40:00] I mean maybe it's just too late for this but like the one thing that would be nice like it doesn't say anything about what happened suppose like 0 3 turns out to be like the wrong number or something like it would be nice to say somewhere to just in terms of setting expectations like that future protocols are allowed to like completely cancel your liquidity pool shares or refund them to you or something like give people like basically like I want to avoid like the next time we revise this being in a situation where like because people are using the previous AMM and we didn't give them enough warning like we're like reluctant to make changes so I'm wondering if we could just add like one sentence to the future work section like it like that like you can't count on your shares continuing to trade in perpetuity if like we upgrade the protocol or something that's a good point because that's also like the kind of the + +[41:00] Like the kind of we have a similar expectation with the other book today that it can basically like disappear any day so people should not rely on that in smart contracts or things like that right and we did do that with the orderbook once and it was fine and I could imagine needing to do the same thing in this context I think that's a really good insight David so they take so somebody just writes essentially a sentence that explains this too the liquidity of poor shares can you may get cancelled then you may have to like claim you claim the claimable balances and like reissue if you know if we upgrade the AMM algorithms or parameters that are in use got it which again just for anyone listening would require valid would require a protocol upgrade that would require the consent of validators yes it's not a decision well that would be a CAP actually it's a new CAP too + +[42:00] Yeah and a new cab right so there would be a whole process that would lead to that it would be out in the open and it would be this like development life cycle so I guess that then there is an additive request which makes sense and someone's going to take that but other than that we feel that these changes I mean am I okay voting for this but authorizing like one sentence to be inserted before it's you know finalized like I don't want to drag this into another protocol meeting it's like a very minor thing that doesn't matter for today so and if we didn't do this I would still be okay with this I just think it would be nice to do yeah it sounds like everyone agrees right I'm gonna go and do it when we get off the phone probably so or get off the phone so old school thank you John okay cool is there anything else that is pressing that anyone wants to bring up if not I think we're done + +[43:00] To bring up if not I think we're done for today there were any questions in the chat before we hang out I don't know everything was so congratulations great we did it let's everybody this is a big deal yeah and so there we go has reached the end of its development life cycle and now it is ready to go out into the world I mean it's already on the test net in Protocol 18 and soon it will be voted on by public network validators which is pretty exciting so great work everybody I'll see you all soon thanks everyone thank you thanks + +
diff --git a/meetings/2021-10-28.mdx b/meetings/2021-10-28.mdx new file mode 100644 index 0000000000..20bc7a819e --- /dev/null +++ b/meetings/2021-10-28.mdx @@ -0,0 +1,110 @@ +--- +title: "Payment Channel Readiness for CAP-0021" +description: "Discussion of the final implementation details and readiness of CAP-0021 (generalized transaction preconditions), including updated rules for handling incompatible transactions, expected Core/Horizon work, and how this unlocks payment-channel patterns, account recovery flows, and high-throughput apps." +authors: + - david-mazieres + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: + - legacy + - CAP-21 + - CAP-40 + - SEP-8 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This call served as the final readiness check for CAP-0021, focusing on whether any remaining technical questions would block sending it to the CAP committee. The group walked through the last set of spec tweaks, clarified how nodes should handle incompatible transactions, and compared those tradeoffs with existing “first-seen” behavior already present in other edge cases. + +The discussion then shifted to practical rollout considerations: what changes are needed in Stellar Core and Horizon, what testing and overlay work might be the riskiest, and how much effort SDK updates are likely to require. The group also covered why it’s valuable to land these capabilities before they’re urgently needed, since payment-channel primitives can enable high-volume apps as well as new account recovery patterns. + +### Key Topics + +- Final spec tweaks + - Reverted time points/durations back to unsigned (signed was deemed a bad idea and not worth optimizing for database convenience) + - Added clearer notes on how bump-sequence and related behavior can invalidate transactions within the same ledger +- Handling incompatible transactions + - Updated approach: keep the first incompatible transaction seen and discard later ones that can’t coexist in the same ledger + - Rationale: consensus and propagation already require dealing with divergent “seen order” cases (e.g., same sequence/fee arriving at different nodes) +- Implementation readiness and scope + - Stellar Core work includes overlay-related changes plus solid test coverage + - Horizon likely needs transaction-queue adjustments to accommodate min-sequence-number behavior + - SDK updates were described as relatively straightforward compared with larger past envelope changes +- Payment-channel viability and patterns + - Experiments indicate CAP-0021 enables safe, predictable payment channels, including flows involving issued assets and trustlines + - Regulated-asset considerations (e.g., SEP-8 style constraints) appear compatible with the channel designs explored + - Performance exploration continues (including “single-message channel” ideas), but isn’t viewed as a blocker for acceptance +- Prioritization and “insurance policy” framing + - Landing the capability earlier reduces risk if a high-throughput use case arrives suddenly + - Also enables otherwise difficult patterns (notably account recovery-style flows) without complex workarounds + +### Resources + +- [CAP-0021 (generalized transaction preconditions)](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) +- [CAP-0040](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) +- [SEP-8](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0008.md) + +
+ Video Transcript + +[00:00] Hi everyone, welcome to the Stellar Open Protocol Discussion. In these meetings, as always, we discuss Core Advancement Proposal, CAPs, which are technical specs that suggest changes to the Stellar protocol and they're necessary to allow that protocol to continue to evolve to meet ecosystem needs. We live stream the meetings so that you can follow along. I do want to note it's a technical discussion so if you're watching, please take a look at the CAP link to in the show description: CAP 21. We do keep an eye on the discussion box and your comments there inform future decisions, but our goal today is to talk through some final questions about CAP 21, so we'll only really address questions here if they're directly related to that goal. Once again, this meeting is part of a + +[01:00] CAPs life cycle. They are started on the Stellar dev mailing list and there's a link to that mailing list for anyone who wants to participate in future discussions. They're drafted and iterated on before they're drafted or before they end up in this discussion, in this meeting, and based on what happens in this meeting, they may get put up for a vote before the CAP committee who decides whether or not to accept or reject a CAP. After a vote, the CAP enters a week Final Comment Period so everyone has a last chance to raise questions on that same mailing list. If a CAP makes it through Final Comment Period, it's implemented in a major release of Stellar Core which is then put forward for network validators to vote on. Ultimately, at the end of the process, it is the network that decides on whether to accept major protocol changes. On that note, there is currently a validator vote to upgrade the network to Protocol 18, scheduled for November 3rd- that's next week- and so that upgrade would introduce automated market maker functionality to Stellar. So it's very exciting. It also introduces some breaking changes. + +[02:00] So if you build on Stellar, please make sure to install up-to-date versions of any Stellar SDKs. You use any Stellar SDKs along with Stellar Core and Horizon if you run a node. But today we're discussing a different CAP destined potentially for a different protocol: CAP 21. CAP 21 lays the groundwork for building payment channels on Stellar. Payment channels allow multiple parties to transact off-chain and periodically settle on chain and, among other things, what they do is they make it a lot easier to build high volume use cases on Stellar. So CAP 21: it makes technical changes geared towards enabling payment channels are great for scaling network throughput. That's kind of my overview where we are with CAP 21. We've discussed it a few times, revisions were David made revisions recently based on the last discussion that we have, and so I think where we're at now is like: are there any questions, lingering issues, anything that we can go over with CAP 21, + +[03:00] Especially anything that's preventing it from being put forward to the CAP committee for approval. So anyone got anything. I'm happy to summarize the changes: awesome, perfect, pretty minor. So you know there was this thing where, like we said that time points should be signed because, like databases were like have is more convenient, some databases have signed 64 bit numbers but like then we decided actually that was a bad idea. So it's mostly like kind of orthogonal to this proposal. It's just like since I we're making use of time points and durations and stuff, so that's reverted to unsigned. Well, go over the small changes. Then people wanted a note about, like the bump sequence, how, like that can like invalidate a transaction in the same block. The main change is that basically, when you have, when you see, two + +[04:00] Transactions that are incompatible, they can't both be in the same block because, like, let's say, one has sequence number 12 and a min seq ledger gap of two and the other has, like, sequence number 11. You know, previously I can't remember what I said you took the one with like the highest or lowest sequence number, something. Now you just take the first one that you saw. And the reason I didn't do that in the first place, because I was worried about: well, now you're going to see a block and you're going to end up having to like fetch transactions that you don't have. It turns out you'll have to do that. You have to do that anyway because this kind of stuff can come out in other contexts, like if you have two transactions that have the same sequence number and the same fee. Then you know like, and they're flooded at the same time to different nodes. Well, yeah, like half the nodes might see one and half might see another, and so we're just doing the same thing. We just you keep whichever you saw first and you + +[05:00] Discard anything you receive later that can't exist in the same block as the first one you received. Is what we David. Is that what we discussed before? Because I thought that was actually taking the lowest. I mean because I thought this was a potential way to artificially like delay forever, like that's right, you don't think the lowest pre-signed contractions right, like a typical of the house: first one is right, but like a bad. You know, a bad actor can always submit the lower one, right. So maybe it's a no, it doesn't matter, you just take the first one. You see, so if something's been flooded and the bad actor floods something incompatible, it just the validators won't accept that or they won't forward it, they would just well. It's more like you have to be first to submit your transaction, right, which + +[06:00] Is kind of annoying from a security standpoint. First, like, if I have, like I think we discussed that I'm not exactly sure if it was relevant to the, to the protocol that is described in the cup. Yeah, because you have, like all some protocols, you have, like you know, many sequence numbers and the reason you have this gap is it allows you to kind of jump forward. So I can basically like delay that transaction in an arbitrary fashion, if I can have, if I have access to those pre-signed transactions. I don't remember actually I would need to go back to the protocol that is described in the cup. I think maybe it doesn't matter in this situation. Can people remember? I remember the problem with the. + +[07:00] I remember the problem with the old one was that, yeah, you could keep submitting something that would, like you know, bump the age or whatever so, and so now it's, it doesn't matter, it's just like whichever one you, there's nothing, I mean, I guess there's potentially like a continued race, but there's nothing that like inherently guarantees that the attacker can win the race. The disclosure, yeah, like I'm looking at the protocol that described in the CAP, so, like the disclosure transaction, is it using the mean sec, + +[08:00] Like those ranges all now? Yeah, so basically you can. And then the second one uses mince. So it's only the mean think age that would be affected, and we don't, I think we don't care in this context because, dude, it depends directly from the. Yeah, the disclosure is not impacted by this flood. First type of thing, right, because it doesn't have that mean age condition, okay, so then it's, I think yeah, like now I remember, like it's for this particular protocol. It's it, it's fine to take the, you know an arbitrary one, yeah, so I mean it doesn't really come up in this protocol because all of the disclosure transactions are compatible, like they could all. Yeah, exactly yeah, but you could all run them all in the same block, yeah, + +[09:00] So does that mean that the way that things are written right now in CAP 21, the question this- take the first one, you see approach- seems to work for people? Cool, great, yeah, it's also simpler to implement. Yeah, I think we actually discussed some of that last time, which was: but it's not perfect in a way that, like, already, other situations aren't perfect, like two transactions with the same sequence, number and fee. So it doesn't make things worse. When you say the first that, you see, that means that is submitted first in real time. Yes, but of course the. I mean of course, before consensus, different validators will see things in different order, right, I think I don't propagate through the network, so, but I think we, you know we already, + +[10:00] But I think we, you know we already accept the fact that you may need to kind of refetch the block, you know, while it's being nominated. So, David, are there other changes that we should walk through? That's it. Wow, John, I didn't have any issues when I mean there's some typos and stuff, but I don't know if they're, you know, worth going through. Lee, I remember that you were doing some like experiments on the kind of performance you can get out of this. I remember you were considering different kinds of cases and different like structures of payment channels that you could do like synchronous and asynchronous and all these other kinds of things. Do you want to give us like some context on what the kind of outcome of all of that was, what the limitations are of this proposal in the, in the land, of what you kind of were experimenting on? I don't know, because I like the. + +[11:00] I don't know because I like the. It's very clear to me that this produces payment channels and they work, but it's not like, having not done any of the experiments myself, it's not clear to me what's the limitation, like, what's the best payment channel you can build here in terms of all these other things? Yeah, in terms of: yeah, so I think there's a couple of things here. So we've been doing experiments to validate: does this produce a payment channel that's safe? Does a producer- can you build a payment channel with this that's predictable, that interacts with things like issued assets? And, yeah, like, what sort of performance can you get out of it? I think the performance thing is something that we're still experimenting with. So, and it's still something that we're figuring that out, and in terms of everything else, what we've seen, you know, we have discovered- + +[12:00] Is this better? Okay, yes, so what we're seeing is that it works really well with issued assets, and we've written up a more, a thorough sort of description about how you would implement this so that it's safe, you know, with trust lines and can be predictable with trust lines, and we have ideas as well for how this would work with regulated assets, things like the assets that use ecosystem standards like SEP 8 and it looks like it fits those, fit those use cases well as well. So I think the experimentation we've done to validate, does CAP 21 enable payment channels sufficiently? Then I think the answer is yes, it does, in terms of, like you know what problems we haven't solved yet. Like you + +[13:00] Mentioned, you know asynchronous payment channels and we've kept 40 that we've discussed previously, that we want to, is sort of being batched together with CAP 21. We can do, you know, single message, payment channels, which is really, I think interesting. So I can send you a message and that's all that we have to do for you to receive everything you need to execute that payment. That doesn't actually have to be any further communication. But I think what we haven't figured out, everything you know for how to implement that type of thing. So I think there is still more experimentation to be done to figure out other like with those experiments, do we, does it make sense to wait to accept a CAP like this into pending the results of those experiments, because they might affect the CAP itself or are or not? + +[14:00] Is that question directed at me, or I think so. Basically, it sounds like there are more experiments that you want to do to sort of verify or find out certain things about the way that these payment channels that are enabled by CAP 21 and CAP 40 would perform correct. So I mean I think we can experiment. We could experiment with this forever, yeah, and there's always aspects of this that we can improve. I think what we've proven so far, and what we've coupled with CAP 40 into CAP 21, is at a point where, like, I don't think we need to do more experimentation with it to try actually using it in some in use cases. So, yeah, I don't think this needs to be blocked on more experimentation. Great, + +[15:00] Yeah, like, I think, like with the, at this point we're probably not going to be blocked on the actual CAP itself, like in terms of from a technical standpoint, I think it makes it's more of a available, of more like prioritization. And is that like, do we want to accept this now? Because we, typically, what we want to do is that we don't want to accept the CAP, that is going to linger for a year or whatever. So it's like, I think what I would like to see is either like- and that's a, you know, appealing to the ecosystem, like people you know coming forward, you know if they are really interested in this, or we're going to have to find somebody, identify ourselves, somebody in the ecosystem. + +[16:00] So that would be my and that's. I think it's going to be kind of the next step for this one. I mean I think in terms of timing, like you, it's. You know, you need to kind of accept the CAP sometimes in order to attract the users, right, and if you look at, you know like obviously we've had things like multiplex accounts, where you know it's been a long road, but it seems like there's like valid use cases and so people are gonna be quite happy to have it in the long run. I think it's definitely the same thing in this because it like fundamentally adds new capabilities that people haven't thinking of, things like you know, in addition to payment channels, right, this also allows like new forms of account recovery and things like that. And but the thing that's the thing about this CAP is that it could be that we need it in like a big hurry, right, like it could be that you know it turns out some new use case comes along that is like super popular and requires like fairly high + +[17:00] Transactional throughput, and so once something is in core, then it's like you know, people can update the applications to work on it much faster. But the cycle is such that you know if some amazing use case came along and then had kind of explosive growth that required payment channels, it would that project would basically die if it had to wait for it to like go through the Stellar Core release process. So I think we should get a version of this deployed into core where- and you know if we have to revise it. You know it's a union, right, we can have a subsequent version of this, but it's better to have something out there current. The things that you have to do to compensate for not having this range from. It's impossible. Like the account recovery stuff is like basically like almost impossible to like. You need like a huge number of like just extra accounts to each have their own + +[18:00] Sequence number and stuff, and it's just like. So I have a question, Nico, which is: how do you estimate the cost of implementing this? Because obviously there's tension here between you know you want to see that there's ecosystem use cases, but, like David says, if you know, if like a monster use case shows up and the network is not ready in time, like is this, is there an opportunity cost here? I mean like, yeah, I mean it's like everything else, like I think we have to go like, I think most likely, from what it looks like the changes that are going to be the more complicated things in as part of that CAP, are probably going to be around the overlay type of changes. That's what it looks like to me. The latest change should make it simpler, right, because, well, you still have to do it in a safe and efficient way, and blah, I mean that's what it looks like, + +[19:00] Especially because we have the prototype that we put together for the actual implementation to. This is one part, I think the. other part was around. I think the question mark on testing, like the test infrastructure that we have, which is kind of broken in the in this context, I'm not sure exactly how much work this is going to be. I mean like to answer the last question. I mean it's not the cheapest cup. You know we've done it's. I would probably put it as like a- you know medium in terms of like, complexity, you know whatever that means, you know. So, like if we were starting to, let's say, work on this like in q1 next year, it would probably be done in q1. I don't know the implications on like. + +[20:00] I don't know the implications on like the SDKs and so on, though like, but it looks like they are basically not. Like there is not a whole lot going on there. It's very similar to the change we did for when we like the. I think like the- when we changed the envelope before without the actual. It's a little easier, yeah, it's like a little easier because there's no max account, which was the big no mess. It's still like. Yeah, like all the SDKs have to upgrade that situation. Yeah, I can talk about the SDKs a little bit, because I prototyped making them change the core Horizon and then the SDKs, and I obviously took shortcuts around testing. But the SDK part was very straightforward actually. So I think there'll be very little. It'll be a lot easier than when we introduce the envelope, a lot easier + +[21:00] Than when we introduced must accounts and I think even the core changes were relatively straightforward in terms of when you just focus on the functionality. But then it definitely got a bit fuzzy when trying to figure out how to test this in the existing test tests, and I'm not that's familiar with the style called codebase. So it was definitely challenging. So I think that's probably right too. And then I think in Horizon has this transaction queue where you submit transactions, they go into a transaction queue, they get ordered by sequence number and then gets submitted when that sequence number is ready to be processed. So it allows people in the ecosystem who are using Horizon just submit transactions in any order and Horizon just sort of smooths that experience over for them. And I think there is some- you know, a little + +[22:00] Bit of work to do there to make that work with min sequence numbers. But other than that, the rest of the Horizon changes were pretty straightforward as well. Something that I want to mention too is: see, Nico was like appealing to the ecosystem, like we want to find out what use cases people have for this. You know we've got Meridian coming up in soon and we are going to be talking about CAP 21, CAP 40, payment channels, how it actually works there, and we will talk about, you know, the performance. We should have the performance experiments done by them as well. So if that's something that's probably like worth watching. + +[23:00] So it seems like where that leads us is technically, this CAP feels sound. There are further experiments that are happening to test performance issues. There's a question of whether or not we should prioritize implementation of it. That we can try to figure out offline, and we would love to hear from people in the ecosystem about any use cases, because that would definitely help identify this as a priority, although it's not necessary that we hear that, but we'd love to hear from anyone who might want to use this and, finally, there will be more info on this at Meridian, which is a good time to learn more out there and also possibly come to us with some idea of how you might use this. Does that all sound correct? Yeah, but you know it's also like this isn't also an insurance policy, right? You don't go around like asking people like you know what are your use cases for insurance or whatever, right? + +[24:00] Like so, yeah, no, I understand, I don't know. I don't think it's necessary, right? I don't think that it has to be the sole criteria to make us decide how to prioritize this. But if someone is like I know what I need this for and they told us that would be helpful, right, yeah, but I don't think it's that's a good reason to delay accepting it today. Yeah, I mean, just like, procedurally, you're gonna get more data. You're always gonna get more information by delaying until it's too late. I mean, I think procedurally, we- there's one person, Jed- couldn't make it today, and so I think that actual acceptance would have to happen asynchronously, offline, and so for me, I think the takeaway is that we should put it in front of the + +[25:00] CAP committee and see what you guys think about accepting. That's kind of where I'm at. I don't know any objections to that plan and CAP 40 as well. Because yeah, and so I guess only objection to CAP 40 was the CAP 21 hadn't been accepted yet. So that's correct. We should. People should go on record now as having any suggestions for CAP 40, or we should move forward with that one as well. No, it should move with at the same time, okay, cool. Well then, I think the next, the follow up action, is to basically put both of those and say, hey, CAP committee, take a look and see if they get approved, which we can do asynchronously. Is there anything else? Are we done for the day? + +[26:00] I take that silences awesome. Well, once again, thanks everybody for joining, thanks so much for everyone watching and again, please make sure that you prepare for the Protocol 18 upgrade. The vote is on Tuesday, November or Wednesday, November 3rd, so install up-to-date software before then and, yeah, if you have anything exciting to say about payment channels. Make sure to join the discussion or check in with us, because we would love to know about use cases that this would enable, and we will see you next time and at Meridian. Thanks everybody. + +
diff --git a/meetings/2021-11-12.mdx b/meetings/2021-11-12.mdx new file mode 100644 index 0000000000..b101667594 --- /dev/null +++ b/meetings/2021-11-12.mdx @@ -0,0 +1,155 @@ +--- +title: "Stellar Next-Gen – Blockchain Use Cases" +description: "A high-level walkthrough of Stellar’s real-world blockchain use cases, covering asset tokenization, anchors and stablecoins, wallets, on-chain trading, and how Stellar supports cross-border payments, remittances, and financial inclusion." +authors: + - ada-vaughan + - anke-liu +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This next-gen session introduced Stellar as a purpose-built blockchain for global financial infrastructure, focusing on how digital assets and open networks enable new forms of value creation, storage, and movement. The talk connected core blockchain concepts to practical examples, emphasizing accessibility, low fees, and interoperability. + +Speakers walked through how Stellar is used today: tokenizing assets, holding and diversifying value through wallets, and sending or exchanging assets across borders. Real-world applications—from NFTs and tokenized securities to stablecoins, remittances, and anchor-based on/off ramps—illustrated how the network supports both developers and end users, especially in emerging markets. + +### Key Topics + +- Tokenization on Stellar + - Issuing digital representations of fiat, securities, NFTs, and real-world assets + - Examples including NFTs, tokenized stocks, and fractional real estate access +- Stablecoins, CBDCs, and anchors + - Role of anchors as regulated on/off ramps for fiat-backed assets + - Stablecoins like USDC and how they move value across borders + - Overview of CBDCs and their fit on open, permissionless networks +- Wallets and value custody + - How Stellar wallets represent ownership on the ledger + - Examples ranging from general-purpose wallets to tools for refugees and the unbanked +- On-chain trading and liquidity + - Built-in orderbook and automated market makers + - Path payments enabling seamless currency conversion across assets +- Cross-border payments and remittances + - Replacing slow, expensive correspondent banking flows + - Use cases for B2B payments, remittances, and cash-based corridors + - Real-world integrations leveraging anchors and global partners +- Ecosystem and community + - Overview of developers, fintechs, and financial institutions building on Stellar + - Learning and funding programs supporting builders and experimentation + +### Resources + +- [Stellar Quest](https://quest.stellar.org) +- [Stellar Community Fund](https://communityfund.stellar.org) + +
+ Video Transcript + +[00:00] Well, first I want to thank you so much for inviting me to the speaker series. It's an honor to be here and thank you to anka for pulling this together. The coordination of things like this is no small feat and we're very lucky to have her, as part of the Stellar Development Foundation, pulling these things together for the next gen initiative. Today I'm going to talk to you about open networks and digital assets. I know that billy explained kind of the ins and outs of blockchain in general. If you haven't seen that first video, it is on the Stellar next gen Discord channel, so feel free to take a look if you missed that. But billy grannis gave a great talk and something to kind of get you started. Today I'm going to talk to you about what we do at Stellar Development Foundation, which we call SDF for short, and then we'll kind of dive into the applications and use cases of blockchain and, specifically Stellar. I'm going to do probably 10 or 15 slides- I'm not sure + +[01:00] Where the marker is exactly- and then kind of pause for questions and they'll have more extended time for Q&A at the end. So, yeah, and if there's any technical difficulty that you can't see or something like that, jump in anytime. But yeah, I'm excited to be here. So my name is aydah vaughn and I work in marketing and communications, but I've had several years of my career here in blockchain and crypto, both as a business developer and in the area of partnerships. So I've worked with a lot of different companies in the space and really excited to be here today to talk to you about Stellar from the SDF. So let's get started. The Stellar Development Foundation is a non-profit entity and we are solely tasked with supporting and maintaining the Stellar network. So + +[02:00] The Stellar network and the Stellar Development Foundation exist to create equitable access to the global financial system. So how do we do that? We do it in three main ways. We make money more fluid, markets more open and people more empowered, and the foundation helps maintain Stellar's code base by supporting the technical and business communities around Stellar, providing thought leadership to regulators and other stakeholders and physically maintaining Stellar's code base. So everything SDF does is aimed at making the network as a whole stronger and making it a success in its mission. So I'm clicking on two screens here. Stellar itself is a decentralized, open network that connects the world's financial infrastructure, so blockchains can be used for a lot of different things, but Stellar is purpose built for connecting + +[03:00] Financial systems, and it does that because of its speed and its scale. That can be used for the Stellar blockchain to transfer value. The exciting thing about blockchain as a technology in general- and Stellar is one of these chains- is that it makes it possible to move money in new ways. Blockchain makes it possible to create, send and trade digital representations, also called tokens- you'll hear me say tokens and tokenization to mean this digital representation of all forms of money, whether it's a dollar, a peso, a bitcoin- it can be anything. So it's designed so that all the world's financial systems can work together and travel value on a single network. So let's talk about who exactly uses Stellar. There + +[04:00] Are a lot of stakeholders involved and, because Stellar is an open and permissionless network, really anyone can build anything they can imagine on Stellar to create impact on the global financial system of today. Stellar is purpose built for working with it, so we look to extend and not replace the world's existing financial infrastructure. So businesses that want to pay for goods and services across borders, banks and fintechs that want to quickly process payments and move assets, exchanges that are making, for example, on chain liquidity with blockchain more accessible, and, of course, the developers who are building and integrating digital infrastructure across these financial systems. Governments are interested in Stellar and we have a lot of communications where we speak with regulators and kind of inform or try to inform policy makers decisions about blockchain technology. And then, of course, consumers who want to send, + +[05:00] Receive and grow their hard earned money- really everyone, since entities can offer financial products and services and take advantage of these features to build better solutions to real world problems. Stellar is uniquely useful. So how do they do it? We'll talk about that. There are three main things that people do on Stellar where it adds utility to these processes. The first is that in order to move an asset on Stellar, it has to be tokenized. So tokenization of assets is a digital representation. It could be what we call fiat assets, like a regular currency, a dollar, a euro, a british pound, a peso. Then, once they have that value, they need to hold it and they may want to diversify it. So they may start out with a us dollar tokenized on Stellar but want to move to a mexican peso, for example, or they may want to send money to + +[06:00] Someone who, when they receive it, wants to change it to a different form of value. One of the things about blockchain, and the reason why it's so valuable for financial systems, is that it's immutable. So in the case of new technologies for moving money, there's a high motivation to tamper with these technologies and because blockchain is so strong in terms of its immutability, its inability to be changed and the ability to monitor, it has a particularly high use, high value in this use case. The third thing that people do once they have assets on Stellar is they want to send an exchange, as I was saying, going across border, making a payment to a business in another country, or even sending peer to peer remittances, so I can send you money if you're in a different country and I can use Stellar to move that value. So + +[07:00] The first step: I'm going to go through these three main ways that Stellar is used, and we'll start out with tokenization. So you might wonder: how does a dollar get on to Stellar? And that is this process called tokenization. In order to have a universal payment network where value can move, everything has to be in this form of a token, and so tokens are a redeemable, tradable representation of any asset, and it only takes a few lines of code to do it on Stellar. Token issuance is really the backbone of the Stellar network and it's how we connect all the world's currencies and systems. It's what Stellar was built for. Asset issuance is also flexible. You can issue a token that represents anything from an ounce of gold to a piece of art, to an hour of your time as a consultant. Could be a real asset, could be a service. It's up to you. Today, I'm going to talk to you about two main types of assets, which are these + +[08:00] Like non currency assets- I'll call them so- things that are not a dollar, not a peso, things like the bit of your time or a piece of art- those are non currency assets. And then we'll also talk about fiat backed assets and the utility for those. I love this example. So you know so much of the internet and so much of technology has grown around entertainment, right? This is an example of a game plan game platform that started with a card game and the cards were able to be traded inside the game and used as assets, so they had value only inside the game. And lightning is a tech company. They call themselves a tech company that creates innovative products to connect crypto enthusiasts and gamers to unique experiences. Lightman started off with a site called lightmint + +[09:00] Io and this was- this is a little screenshot of their game. Their cards were collectible on the Stellar blockchain and then, when their community wanted a place to start to trade their cards and interact with one another with the card's value, that's when the idea of an NFT marketplace came to life. So do you all have the ability to raise your hand on your? Let me see I should have asked at the beginning to raise your hand. So how many folks have heard of an NFT? Okay, so now, for all the people who have ever owned an NFT, keep your hand up. So game platforms and NFTs are used- enlightenment, and + +[10:00] It allows creators. As you may have heard, NFT stands for a non fungible token, is a unit of value that cannot be divided. So if you have an NFT that represents an avatar that you drew or a service you're providing, whatever it is, you cannot split the NFT and it has a single representation and but they can be traded. So this is an example of a game platform that uses NFTs, and it's pretty neat because the Stellar network is very cheap to operate, unlike some of the other major blockchains, and so creators can create, can make an NFT that costs five or ten or twenty or even a dollar, and because the cost of transactions on Stellar is so low that really anyone can do this, which is pretty neat. It's another layer of kind of inclusion in Stellar that is pretty fun and it allows for a very flexible and + +[11:00] Movable kind of NFT. So, on a more serious note, tokenization of securities. I think so I'll do the same question, like: how many of you own a security, own a stock? Probably everyone, right, everyone in the room- probably owns some sort of security. And I think in, go ahead, let's see. Is it every single person? That's got to be right, everyone's got something in their portfolio. Hopefully there are some people that raise their hands. Can you see them, ada? Yes, I can see. I see a seat. There's a sea of hands. Yes, I can't. I can see that nearly everyone, or maybe everyone, has owned a stock. You probably have not owned a tokenized stock, and the reason for that is that we live in a high income economy where we have very easy access to high quality securities on. You know I can go on e trade almost any of us can open an account + +[12:00] At a fidelity or an e trade or charles schwab. Any of these brokerage houses will welcome our business and allow us to buy securities and enter into other kinds of investment opportunities. However, in the rest of the world, especially in emerging markets, it is not that way at all. We can buy a share of tesla or apple anytime we want, but if someone who is living, let's say, in a small town in Latin America or in sub saharan Africa or other parts of the world, they simply don't have access to these kinds of assets, and so tokenization of securities is a really interesting use case for blockchain and for Stellar specifically. I'm going to show you an example of it. But it, whether it's stock or real estate, these kinds of assets- again, they're not money, they're different than a currency asset, but they can be tokenized in very similar to securities. That can be tokenized into shares and tokens on Stellar. + +[13:00] So I'm going to show you an example of a company called d stock. They have kind of a fun spelling of their name and give you an idea of how that works. So they are a company that offers access to stocks like apple and tesla, like you can see here, in emerging markets. They also allow micro investing, so you can buy a fraction of a share for someone who maybe only can invest, you know, 10 euros or 20 euros, and this is a really unique way to expand assets, expand access to high quality assets that otherwise they would not be able to reach, and they can also securely trade them. So, inside, this app is available in Europe, vietnam, south Africa. There are a number of markets where people can then trade these fractional shares, and so it's kind of a marketplace in and of itself and it's pretty. It's a pretty interesting + +[14:00] Use case for financial inclusion. Here's one more. This company is called realty bets and, again, realty bits is a marketplace on Stellar, and they have built in infrastructure where people can invest in opportunities like startups, like real estate. They started out realtyvis story is that they started out with their name, as you might imagine, mainly doing real estate, but then they had these verified- you know sponsors and investors involved in their platform- and realized that they could do a lot more, and so they expanded to other types of assets, and it allows for CAP table management. It just really simplifies the whole process. You know, normally to invest in a private asset it's quite a clunky process, you know. If there's a private equity manager and they got to talk to this person, talk to that person. It's very slow, there's a lot of friction and it's very expensive, and so this just promotes a lot of efficiency + +[15:00] And these kind of investor sponsor relationships and streamlines the whole process. So realty bits built this platform and it offers users access to investments that they again wouldn't ordinarily see or where investing maybe in a small way would be too cumbersome, so it gives them a faster on ramp so, as you can see from these use cases, any company- it's open source- any company can issue any kind of asset they want on Stellar. So now we're going to talk about- well, my slides have gone crazy, sorry. Now we're going to talk about the use case for fiat currencies. Who here has heard of the term stable coin? Couple people, how about? Okay, so keep your. So now I'm gonna + +[16:00] Ask another one. You can lower your hand. And then, how about cbdc? I can say it so fast because I say it a. I can say it so fast because I say it a lot: cbdc- okay, a few people, not many. Stablecoin is a relatively new term. When you tokenize an asset on Stellar and it happens to be a fiat currency, we call it a stable coin. And what that means is because blockchain assets grew up. In this world of coins like bitcoin, everything's a coin, something's a coin. stablecoins are meant to mean that they are always tied in a one to one relationship with their underlying fiat currency. So one, let's say, USDC, which is a us dollar stable coin we'll talk about in a minute- is always worth one U.S. Dollar and redeemable for that dollar. So stablecoins are that kind. They can be any representing any of the world's currencies, + +[17:00] But they always remain stable to the value: one coin is one dollar or one coin is one british pound, just like that in the case of cbdc's. This is a relatively new development in blockchain. So there are- country probably- I want to say 60 to 80 projects, pilot projects going on around the world, and then there are a few that are starting to, I think to like roll out and get beyond pilot stage. But the idea of a cbdc, which stands for central bank digital currency, is beyond a stable coin. So these are a type of stable coin that a central bank issues the asset and what that would mean is that central banker can retain control of their monetary policy- and that they can control the economics and security and functions and there's some programmability on Stellar- a lot of really neat features. But cbdcs are a form of digital currency that are + +[18:00] Meant to be controlled by a central bank, just like ordinary fiat money, so kind of central. It keeps that central control over the monetary policy. Oh, my hand is raised, I can't see who raised their hand. Are your hands still up from the cbdc question? If anyone has a question, go ahead and jump right in and speak. So I can't see all of you. Yeah, my bad, I think I just left it up. Oh, no problem, all right, great, so we are going to talk about USDC, the oh, and I should probably mention anchors before I get any further. So cbdc's and stablecoins, as I said, are representations of regular money. So when you have a regular dollar, you give it to an asset issuer and they give you a token. And those issuers we call in the Stellar network we call them anchors, and that is because they anchor the fiat value to the token and so + +[19:00] They're a very important part of the system of onboarding value and off boarding value to the Stellar network. And you'll hear more about anchors in just a minute. One really important anchor is USDC circle. I'm sorry my slides are so jumpy, I need a better mouse. The in this graph is actually a little bit out of data. I believe we have surpassed today 40 billion USDC in circulation. So usd stan. USDC stands for usd coin. As I said, many things are a coin in our world. USDC is issued by regulate, regulated financial institutions, it's backed one to one by U.S. Dollars and it's governed by a consortium called center centre. I think someone went off mute for a second. And so there are these kind of technical and policy standards, + +[20:00] Financial standards for stablecoins. USDC is always a dollar and it's available on multiple networks too, which is one of the reasons, I think, for its fast growth. it has transferred on chain over a trillion dollars in a year, basically, and so there's a lot of movement of USDC. It's super useful as a way of moving value without losing value, because it's always stable. It's a stable coin and the usdnc in circulation has been growing all the time, as I said, I think it's over 40 billion right now. So these anchors, as I was saying, are both the on and off ramps of value, and you can kind of see an example of some of the Stellar anchors around the world in this slide. So, in addition to USDC. There are other us dollar stablecoins that are issued as well from the U.S. As well as argentine pesos, there are euros, there's nigerian naira, philippine pesos, there are + +[21:00] Lots more, and so, especially when you have this transition between currencies and when one of those currencies is, I'll say, difficult or less liquid- if you've ever traveled someplace in the world where it was sort of a currency you don't use as much- those kinds of currencies have a lot of friction and transition. People tend to pay very high fees, and so connecting those financial systems with Stellar has a huge benefit for the citizens there, and those anchors in the local area serve as an on and off ramp for value to the network. So they accept the local currency, and then they trans, they can onboard it and keep it in that currency or can change on its way. So when they do you have a question? Let's see another race. Yeah, I had to go ahead generally, or like cbdc's, yeah, and so I was + +[22:00] Wondering if a lot of the and so I was wondering if a lot of the hype around, like crypto is about how it's decentralized, but cbdc's are like regulated and issued by like central banks. How do they fit into the picture of like cryptocurrency in general? Yeah, that's a good question. So assets are all are issued. Assets, like stablecoins, are typically issued by a single source. So even you know, center is a consortium of companies, but circle is the issuer of USDC, so the all the value is sitting in a centralized place. I think. I think the difference and what we are trying to push for also with cbdc's is to have them have the ability to move on an open and permissionless network. So we care a lot about the network versus the asset issuance. I mean it's great, it's easy and secure and there's a lot of great features issuing assets on Stellar. We absolutely want people to do that. But in terms of our global financial inclusion goals + +[23:00] In the space of cbdc's, we really want these assets to be changeable and to be able to travel on. To be by changeable, I mean interoperable, so they can travel between geographies, and to be able to be used by anyone, and I think actually central banks want the same thing. They want access for everyone. That's a it's a large concern. They want security, they want access. We're actually getting ready to publish a study, a kind of a deep research paper, on cbdc's, probably before the end of the year, fingers crossed. That anka can definitely hook you up to if you're interested in learning more about the details of policy frameworks for cbdc's. It's pretty intricate stuff. Any other questions, go ahead, jump in. Hi hello, thank you adam. I was just having a question about how the anchor system + +[24:00] Works. So, the way I understand it, you go to the anchor with a hundred bucks. I'll give it to thank god. They gave me back hundred tokens. Would these anchors be holding off hundred bucks, like in their reserve, or would they be holding one certain one portion of it and using the rest for investments like a bank does? And is that how they make their fees? It's an insightful question. Yes, so most anchors do charge a fee to convert from the fiat- whatever fiat you're giving them- to the tokenized asset, and they are anchors are regulated financial institutions. So while they're, you know, across the world- for example, like a famous and very criticized stable coin called tether usdt- people are very critical of their reserve, of their policies around the reserve, and they ask for audits and things like that. I think, in general, blockchain is fairly transparent and companies that- + +[25:00] And Stellar is this way too- very open and transparent about policies and what you know, how they manage funds and exactly what their reserves are and where their reserves are. We insist on companies that become anchors on Stellar have to identify themselves. They have, you know, they're kind of a trusted financial institution in their own market and they all have to comply with their own local regulations, but our expectation is that they are fully reserved assets. We are an open network and so anyone can use it, and it's definitely up to the user and the consumer to make sure that they're making smart decisions, but we really look to hold our anchors to a high bar. Well then, if one of the anchors go down, would that mean other anchors have to help save them? Or is this connected network or are they all by themselves? Because if they're not, they're just like individual + +[26:00] Banks, small banks that just exchange between fifth way off fiat money and tokens, everything. They are msb, so they are money service businesses or fintech, so they're like a bank in their local area, for sure. They actually act like a local bank. So they're prone to the same risks of a bank going bankrupt because of that. Investments. Potentially, although tokenizing, I would say that anchors are the on and off ramp and I would use the word ramp, because you don't usually hang out on a ramp right when you go on the highway, you get on and you go where you're going, and when you get off, you don't park on the ramp and stay there. So I think most of the time value is moving through the anchor and it's on its way somewhere else. So I think still the majority of the use cases that we're looking at for these kind of cross border payments, they all start in some kind of fiat and end in some kind of fiat. So you know, and this + +[27:00] Can happen in you know, and this minutes, when it arrives at the destination, generally speaking, people are going to land the value in their bank account for in cash sometimes, and so that means the transaction happened and it's done. You have your value ready, Thank you so much, ata, sure? No, I think I left my hand up just saying okay, hi, nice to see you again. So, yes, all of the anchors. Just to sum up on that question, all the anchors are maintaining these fiat reserves once they onboard value to the network equal to the tokens that they've issued on Stellar and then anytime people redeem or pass to another anchor, then that's how the value will move and eventually land and be out again. Our vision for Stellar + +[28:00] Is this: you know: better connections, better interoperability for a world where, you know, we see globalization happening but the access to financial services is still fairly unequal. You know, when USDC kind of moves through the economy, the underlying dollars are still sitting in the bank. But we're enabling all of this commerce. There's economic activity. People can loan, you know, tokens to one another. There's no intermediaries or banks, you know, taking a piece of each transaction along the way. So there's a lot less friction and we're really envisioning blockchain as enabling this more connected, more interoperable world. The next thing: so that was all about asset issuance, getting value on to the Stellar network. I'm going to pause here for a second if there's any other questions before we talk about holding and + +[29:00] Diversifying the value on Stellar. Any other questions? All right, holding and diversifying value on Stellar. You know, anyone who has value on the network needs a place to keep track of it when you move money. You're used to this right when using a bank. Anytime you move money, you move it into the bank, then you take it out, use it or spend it. But accounts on Stellar work differently. Even though we call them accounts, they really are just a marker in the ledger that says what belongs to you. So this is- and I'm sure billy talked about this too- this is distributed ledger technology. So on the ledger it says you own this five dollars, let's say this five USDC, belongs to jim. It's in his wallet and everyone knows it's in his wallet because it's on the shared ledger, and then, when he wants to spend it, there's a transaction that sends it from his wallet to someone else's wallet. And so, as you + +[30:00] Might imagine, these digital wallets, which are different than a, you know, a cash wallet that you use, you know, unlike a physical wallet they are, they don't contain the money. They really are just showing you a vision of you know, a view of the ledger, of where your money is noted in the ledger, and so they don't really store assets, but they give you access and they give you the ability to control it. Wallets also connect to anchors. So when you do walk in with your cash or send a bank transfer to an anchor to get those tokens, you need to have a wallet to land that value coming out of the anchor and tokens. There are a lot of different wallets have to be compatible with the blockchains that those assets move on. So Lobstr is an example of a Stellar compatible wallet and it gives users access to any asset + +[31:00] On Stellar. So whether you've tokenized a enlightenment card or a bar of gold or 100 USDC on Stellar, all of those are available to be seen in a with a Lobstr wallet, and it has a mobile and a web version. It's very clean design. We really like and recommend Lobstr and it's also very kind of open ended so it can be used for all these different kinds of assets. As we look at these fiat currencies and the things that you can do with them in your diversifying value, I really love this example of another wallet that is purpose built for a very specific use case, and this wallet is called Leaf Global- leaf runs. It can run on actually a like a low feature phone as well. It's been deployed in Kenya, Rwanda and Uganda since for about a year- September 2020- and it enables people + +[32:00] Who don't have a bank account, don't have an address, and who are completely disconnected from the sort of traditional financial system, to hold val, hold, spend and send value is really designed for refugees, cross border traders and the financially marginalized people who, for whatever reason, cannot get a regular bank account, and so it's really accessible. You know everything from the different languages that it has, the ways that it identifies people. You would be surprised how difficult it is to pass these traditional- what we call KYC know your customer checks in sort of ordinary financial services, and so it's a. It is a terrific help for refugees, because they also are very vulnerable to robbery. When they have to carry cash, it can be dangerous, especially when crossing borders, so they really have a safety issue that is solved by the + +[33:00] Leaf wallet and it just it empowers them to hold and keep value when they need it most. So once someone has a place to hold value in their wallet, then we can think about ways to diversify it. There's d stock. We talked about equitable access to various kinds of investments. Earning yield is something that has come into the forefront with all different kinds of crypto, but, in particular, you can earn yield on stablecoins. And so let's say, for example, I live in argentina or venezuela or zimbabwe or other places that have had severe problems with their currencies. With crypto, like with a stable coin, I can save in U.S. Dollars and I can also deposit that + +[34:00] In different kinds of wallets that are in yield. So it is such a wonderful hedge against inflation, especially- I mean we see inflation here. I think the numbers that came out for October were over six percent. I think we have right now in the us 6 2 annualized inflation. It's sky high. You know, a gallon of milk cost about a dollar in 1968 and today it's almost four dollars. So you can see the effects of inflation are dramatic if you don't have your money invested. If you're in those marginalized communities where you're not able to invest, then you are very vulnerable to inflation. And so vibrant is a great example of in argentina and across Latin America. People can save in U.S. Dollars and the banks- they're maybe the very wealthy- can save in U.S. Dollars or keep value in us dollars, but it's not available + +[35:00] To the average person, and so vibrant is a super useful app for people to keep value in us dollars and even earn on their balances. It's a super easy platform to use and it's built on Stellar. So finally, we're going to talk about sending and exchanging assets. I'm really sorry for these jumpy slides. Sending and exchanging assets is one of the things that Stellar's is uniquely suited to do because of the way it's structured and because it's an open and permissionless network. There are lots of pathways how value can move. One of the most useful kinds of applications that we see is cross border payments and remittances, because you are able to send fiat value once it's tokenized on Stellar and have it change along the way into another store value. So these cross currency transactions are really very efficient on Stellar and part of + +[36:00] That is due to what's called a built in DEX. So a DEX is a decentralized exchange and what that means is that there's an orderbook, similar to a stock exchange or other kinds of exchanges, where assets can be traded for one another based on a bid and an ask price. So this is just kind of a representation of a typical orderbook on the DEX. We recently released another piece of technology called automated market makers, which also allows assets to be changed on the fly and improves the efficiency of Stellar. I think when I said you can send currency in one, send from one currency to another, that is kind of a unique thing about Stellar, and we do it with this thing called a pass payment which can go through the DEX or it can go through + +[37:00] The AMMs. The automated market makers and these path payments work from. So if you kind of look at this map, they start out. They can start out in the philippine peso change to us dollars, to lumens or nigerian neighbors, and along the way they can make many hops into different currencies as needed and land in the value of that the receiver wants. Both sides only get the currency that they want to deal with. Sorry, lucian's asleep, so I think somebody went off mute, but this allows for a lot of flexibility and different use cases on Stellar. So let's look at a cross border payments and remittances. This is an enormous market: 37 trillion in 2020 and 39 trillion by 2026 is predicted. One of the real areas of friction is, as I was saying, in emerging markets. As + +[38:00] I was saying, in emerging markets, where cross border payments and remittances can cost 10 percent. So the people who need the money the most are actually paying the highest fees for these transactions and you know whether they are migrant workers sending money home, businesses paying remote staff, suppliers getting paid by customers. There's all this friction when transacting between different currencies. Intermediaries all take a piece and you know, starting with the payer's bank and ending in the account of the recipient is usually many steps. Each cost some money and time, so it tends to be very slow. Because of globalization and our- you know- increasing tendency to do business across borders, a lot of fintechs and large multinational companies are looking for new ways to do cross border payments. They're really pushing in this regard and b2b payments are a strong use case on Stellar. There's a lot of this type of transactions on + +[39:00] The Stellar network. Here's a look at kind of the existing landscape for payments. So if you're in Europe you might use the sepa inside of Europe. Sepa payments are really, you know, quick and efficient and cheap. But once you want to try to send money overseas then you're riding on swift or some other you know some other rail. If you're inside the us, you might use an ach which you know takes a few days, but it's pretty dependable, pretty cheap. The problem is that none of these payment kind of systems are interoperable. So I can't send a step up payment to an ach recipient in another country, and so you know, in the 1970s they stood up. This system called swift and it was- it stands for the society for worldwide interbank financial telecommunications, so it's kind of a mouthful. But swift payments were established out of brussels with certain standards where + +[40:00] All these member banks could send money to each other. But swift was never really built, frankly, for individuals. It never served peer to peer very well. It's pretty slow and it's a closed system so only banks can participate. You can't have a fintech just jump in and initiate a swift payment. They have to use a bank to do it. So sending funds cross border is usually super slow and incurs a lot of fees. So you know, one way that companies deal with this is they just open bank accounts and all the geographies where they do business- which can work, especially if it's just one or two geographies maybe, and it's a very large company- you can establish operations and get a bank account wherever it is that you want to land value and then pre fund your transactions in that geography. But now you're holding capital in all these different markets and you're also exposing yourself to forex, to currency risk. You know between the currencies that you're holding. So you know traditional, you know cross + +[41:00] Border payments for businesses can be pretty much a pain. I will say you know. They start with this. Let's say, a service provider issues this invoice and then the customer goes to pay them, so they wire their money to a correspondent bank. The correspondent bank receives the money, sends it to the receiving bank, you know. So there's a lot of different steps and some of them are even manual, so you have like actual paperwork and things that are seem archaic to us. Now. The typical time to settle a transaction like this is two to five business days, depending on the geography, and this is counting on having developed- pretty much developed- countries. On both funds. Fees can run anywhere to five or six percent. The way that Stellar manages cross border payments is a little different, as you might imagine. So, still, the service provider issues that invoice, but instead + +[42:00] The customer pays an anchor in their home currency. So the anchor is responsible for onboarding that value and sending it to an anchor in the destination country, and then that anchor in the destination country pays out the fiat value in the local currency to the receiving bank. So here you have a frictionless, blockchain based pathway for payments with local partners to provide the on and off ramps for fiat, and the effect is the same, it's just a lot cheaper. and a lot faster. Still, the service provider is paying and the receiving bank is receiving for the supplier, but there's no intermediary bank fees in all of these steps, and they're all dealing in their local currencies as well. So the this kind of replaces the process of the international wire and the currency conversion in a single pathway. D transfer is an example of an app + +[43:00] That allows companies to do this and kind of shows the pathway along the way for b2b payments between currencies. Their fees are something like one tenth of a traditional bank, and so it's a pretty high utility. It's connected to banking networks in Europe, the us, Mexico, brazil, Tanzania, expanding through Europe, asia, Africa and the americas. Now ClickPesa is another example that deals with the intra African the high fees inside Africa, so I think I mentioned a fee of 10 in Africa. Paying between countries in Africa can be is definitely in the double digits. It can be 15 fees, 18 fees between African countries, and so ClickPesa gives companies that operate in Africa + +[44:00] The ability to accept payments online in a b2b payment platform and provides the on and off ramps for tanzanian and kenyon banks. Finally, remittances: we're going to talk about the remittance market, which is, you know, a very high impact area for financial inclusion, because many countries in the world have a large percentage of their gdp coming in remittances. Their families all over the globe that depend on remit says for to survive, to live, and each year about half a trillion dollars of value is transferred cross border through personal remittances. In most cases they're small. They're, you know, 200 to 500 and they average cost is somewhere around seven percent. In between developing economies like I was mentioning, in Africa they'll be over 10 typically. So you have traditional money transfer organizations but they're not + +[45:00] As efficient as they could be, they're not as fast as they could be and with you know, the insertion of correspondent banks and kind of the older payment rails, there's definitely a lot of opportunities for improvement and so we really see this Stellar powered remittances as a huge advantage. These are just a couple- tempo and arf- that enable remittances. Tempo is located in paris and arf is in istanbul and they enable remittances to happen in a much more seamless way across Europe, and tempo has other pathways to Africa, an additional location. So it's really pretty exciting to see these remittance use cases come to life. Finally, I'm going to show you one of our most exciting developments at Stellar is the use of moneygram. So you may be who's heard of moneygram curious. It's a pretty large business. but they're primarily involved. So here's a couple + +[46:00] People who have heard of the company moneygram. So they are a money services business that allows people to send money around the globe, typically person to person, and so what we've done with moneygram in this recent partnership is connected digital wallets to moneygrams so that people have a pathway for cash in and cash out using the money gram physical locations and the value moves on Stellar. And what's really cool about this is you have people who need to receive money in cash, for example, in Latin America, because that's how they primarily, you know, live and transact, but they may not have a bank account, so they might not have a way to get the cash out, and so, using these money transfer services, we're able to leverage moneygrams, retail locations, along with the efficiency and speed of the Stellar network to get remittances, get cash in the hands of + +[47:00] People, much faster. The partnership just started and so there's going to be a pilot, and I'll actually I'll mention, in case anka hasn't already, that we're going to be giving a demo and a lot more information about this partnership at our conference called Meridian, which is completely free. It's November 17th and 18th, next week. So if you're interested in Stellar technology- and you know we have debates and there's a lot of interesting stuff- feel free to register. You're all invited to attend and, of course, stay in touch and, depending on your interests too, if you have more technical interests or more business oriented interests, there are kind of different tracks. So there's two days of you know, presentations and opportunities to interact with SDF. With SDF, we like to think of our + +[48:00] Ecosystem. We call it an ecosystem right, because there are companies. SDF is really just here to support and foster the Stellar network, but now there are all these companies all over the world that are plugged in and we see this as an approach of driving global financial inclusion rooted in global collaboration. And so all of these companies- whether they are wallets, exchanges, money transfer businesses, payments businesses, wallets- they're all. participating in this ecosystem. And whether businesses focus on one use case or they combine different use cases together to meet the needs of their target customers, there's just a lot of innovation going on. Stellar, you know, we're always getting new features, and that's why I encourage you to attend Meridian and find out more about it. So these are just an example of some of the businesses that are part of the Stellar ecosystem today. It's like watching a show. There + +[49:00] We go. These companies are located all over the world, and so this global ecosystem has evolved to serve a lot of different needs. Uncle, do you want me to talk briefly about Stellar quest and the community fund, or do you want to talk about that, since it is your area of expertise? Sure, yeah, let me, let me. Do you want to put up the slide? Sure, I'm glad too. So this is totally cool, go ahead, yeah, sure thing. So, yeah. So Stellar quest, telequest, is a fun way, and gamified way, to learn tech of + +[50:00] Stellar. So currently, like, if you sign up, you can go through practice challenges. That makes it easy to kind of learn the tech of Stellar, and you don't need to know how to code. So while doing it, while completing these fun quests, you can earn rad, like collect red NFTs, and also during live series you can collect, earn excellent, so it's actually very fun. Next slime is the native currency of Stellar. I definitely, I definitely, recommend anyone who is like watching this to go to quest org at least go through the first year. Seriously, how you feel, because then you get really like, oh, this is how ledger works and you'll actually work with tools that we call the laboratory, where you can submit transactions directly to the blockchain. So, yeah, very exciting, and then you get a good feel of how things are looking. Yeah, so I guess we can go to the. Yeah, so I guess we can + +[51:00] Go to the seller community fund as well. Yes, I will eventually turn this slide. Sorry about that. Yeah, so you know, if you do enjoy like building on Stellar and I think, like you know, jim also mentioned having some kind of hackathon. At some point, I think you should definitely check out communityfun seller org it's an open application grant program for Stellar-based projects really to like you can build and grow your project and then submit it to the scf, and then we also have a really vibrant Discord community where you can go in and also check out all the other projects currently, like, our submission deadline is November 21st, which is obviously coming up, but definitely after about- I think, like November 24th- you can check out this rounds selects and, yeah, check out what people are all building. So community fund is really cool because the community itself votes on which projects they like best, and so, while SDF, like, we provide the funds, the communicate community + +[52:00] Allocates it. So it's a really investing in the community's vision. That was it for me. Thanks, so much, thanks. Thank you so much, Anke. So `Stellar.org` is really the place to learn all about everything that's going on at Stellar, from news to developer relations. There's a pretty large knowledge base there that talks about all these different use cases. There's cool videos, all kinds of things. So, yeah, I invite you to access `Stellar.org` anytime and learn more about Stellar's open network for storing and moving money. + +
diff --git a/meetings/2021-11-19.mdx b/meetings/2021-11-19.mdx new file mode 100644 index 0000000000..8fcef3e673 --- /dev/null +++ b/meetings/2021-11-19.mdx @@ -0,0 +1,144 @@ +--- +title: "Stellar Next-Gen – Technical Deep Dive into Stellar" +description: "A technical walkthrough of Stellar’s architecture, covering how the network works end to end—from nodes, ledgers, and transactions to consensus, governance via CAPs and SEPs, and hands-on development using Horizon, SDKs, and Stellar Laboratory." +authors: [justin-rice, marta-lokhova] +tags: [community, CAP-38, SEP-31] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session delivered a deep, system-level explanation of how Stellar operates under the hood. The discussion started with the fundamentals—nodes, accounts, ledgers, and transactions—and gradually built up to how those pieces fit together to enable fast, low-cost, and final payments on a global network. + +From there, the speakers explored Stellar’s unique approach to consensus and governance, comparing it to proof-of-work blockchains and explaining why safety, determinism, and low latency are core design goals. The session concluded with a practical developer walkthrough, showing how builders interact with Stellar using Horizon, SDKs, and Stellar Laboratory to issue assets and submit transactions. + +### Key Topics + +- Stellar network fundamentals + - Nodes running Stellar Core maintain a shared, immutable ledger + - Accounts, balances, and operations as the core ledger primitives + - Transactions composed of up to 100 ordered operations +- Ledger updates and transaction flow + - Transactions submitted by users and applications worldwide + - Validators agree on transaction sets every few seconds + - Once applied, ledger changes are final and transparent +- Consensus and security model + - Stellar Consensus Protocol (federated Byzantine agreement) + - Quorum slices, quorums, and open participation + - Preference for safety over liveness to avoid forks and reversals + - Comparison with Bitcoin and Ethereum consensus approaches +- Performance and sustainability + - Fast finality with single-confirmation settlement + - Very low transaction fees compared to proof-of-work systems + - Energy-efficient consensus without mining or heavy computation +- Open-source governance + - CAPs define protocol-level changes to Stellar Core + - SEPs define standards for building interoperable applications + - Network validators vote on protocol upgrades +- Developer stack and tooling + - Stellar Core, Horizon API, and language SDKs + - Using Stellar Laboratory to build, sign, and submit transactions + - Live demo of issuing assets and querying ledger state +- Getting started as a builder + - Testnet usage for safe experimentation + - Learning paths via documentation and Stellar Quest + - Community-driven development and contribution + +### Resources + +- [CAP-0038: Automated Market Makers](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md) +- [SEP-31: Cross-Border Payments API](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0031.md) +- [Stellar Laboratory](https://laboratory.stellar.org) +- [Stellar Quest](https://quest.stellar.org) + +
+ Video Transcript + +[00:00] Justin Rice. I'm the VP of the ecosystem at the Stellar Development Foundation, where I work with people to understand, how to use the network, developers and enterprises- and at the SDF. Our goal is to support the open source, open participation Stellar network, of which you will hear a lot about in a minute- in order to increase equitable access to the world's financial infrastructure. Marta hello, and yeah, I'm Martha Lakova. I'm a software engineer on the Stellar Core team at cell development foundation, and so I work on the actual implementation of a distributed ledger, which is what powers the Stellar network. So really excited to be here with you. We ready, all right, we are going to dive in. This is going to be a fairly in depth explanation of Stellar and of the blockchain technology that powers it. We're going to talk for a while, hand it to Marta, and then we'll probably pause about halfway through for questions and then we'll sort of dig, roll up our sleeves and sort of do a little bit of + +[01:00] Playing around with the actual network. So, technical, deep dive, interstellar quick agenda. I'll explain how the Stellar network works. I guess I just sort of went through this. Give it to Marta to talk about how it compares to other blockchains, and then we will talk about sort of open source and an open source project, and then we'll use the Stellar laboratory and leave some time for Q&A. Okay, the Stellar network is made up of nodes. What are nodes are computers that connect and communicate with one another to keep a common accounting ledger and to approve and ratify changes to it. So on the image on the right, all those dots, those are nodes. Stellar's open participation, so anyone can spin up one of these nodes and they run this thing called Stellar Core that Marta helps build. It's decentralized because these nodes are run by independent individuals and organizations all over the globe. So each of those dots is a computer running style according to community, + +[02:00] The common ledger that nodes work together to keep it's stable. Physics major in the college, nice, and you guys are all students. Can you please mute your microphones? Thank you, I thought that was like a like, just like coming in hot with a question at the top and I would. I approved that level of verve and engagement. Okay, ledger, understanding the ledger. So I said that knowns work to keep a common ledger. It's stable, it's secure, it's transparent. Stable because the network is spread over a bunch of servers and computers all over the world, so you can't turn it off. There's not a central server. It's secure because the ledger entries are immutable, right once a transaction has been processed. No one can change the data or manipulate the numbers to their liking. And it's transparent because, like many blockchains, like all black public blockchains, everyone can see the ledger and trust that the information is correct. That's the ledger. So nodes keep the ledger. And what's on the ledger? Well, + +[03:00] Accounts are the essential data structure in Stellar. They are saved on this global ledger. They hold balances, they sign transactions, they also issue assets and account access is controlled by public, private key cryptography. So every Stellar account has a public key. Usually they start with g and every account also has a secret key, which always starts with an s. The public key is safe to share, right? That's how people identify your account so they verify you, authorize the transaction. It's kind of like your email address: people need to know it to send you an email. That reference feels probably dated to many of you. Your secret key, however, is private information that proves that you own your account. It's like your password can't share with anybody. You use your secret key to sign transactions, which essentially call functions that change the state of the ledger. So transactions, that's the next word. Transactions on Stellar are made up of. operations, which are the actual + +[04:00] Functions that change the ledger. So a payment, that's an operation offered to buy an asset, that is an operation. Now, this is Stellar vernacular. Here it's a little confusing because operations- honestly, that's what most people think of when they think of transactions. But on Stellar you can actually bundle 100 operations into a single transaction. You can put them in a specific order. They execute in that order. You can use that to do some interesting things. We won't get into it right now, but when I say operations you can kind. of think transactions. Most transactions in fact consist of a single operation. Each of the entries that you see in this example ledger, that's a single operation transaction. So Lisa sends me fifty dollars. Thank you, Lisa. Transactions are time stamped and they contain unique identifying information and every three to five seconds- those nodes that we talked about earlier- they bundle all the transactions together into a set. They apply that set to change the state of the ledger. Let us + +[05:00] Walk through that process. So it all starts with a ledger, which is, again, is essentially a list of accounts and balances. It's like a giant spreadsheet and a sql database. Users, wherever they are in the world, they submit transactions to send and exchange assets on the network. Some of these are individual users with Stellar wallets. Some of these are businesses using making b2b payments and using Stellar, they can move funds anywhere in the world. They can convert currency along the way thanks to a built in system of exchange that includes orderbooks and automated market makers, which we will talk about later. These transactions are essentially bundled into sets, they're confirmed by validating nodes, they are applied to the ledger, at which point they're final and create an immutable record, and then the whole process starts over. So why is a blockchain ledger immutable? Why can't someone change the previous entries on the ledger? How do these transaction sets come together and get finalized anyway? To answer those questions, I'm going to pass the mic to Marta, who's going to dig in and explain how blockchain protocols work generally + +[06:00] And also how the Stellar consensus protocol works specifically. Marta hello, Justin, thank you. Always great to hear you talk about these things. So yeah, so let's dive a bit deeper into technical aspects of the Stellar network and the Stellar consensus protocol in particular. So to do this, we're going to put things into perspective. We're going to compare Stellar to two biggest blockchains out there: bitcoin and Ethereum. So let's start with the intended function of these three blockchains. So bitcoin was the first blockchain to show a decentralized, completely native store of value, excuse me. So we think of it as the first internet native money. Recently, more and more people have been referring to bitcoin as digital gold, due to its scarcity and potentially a way to beat inflation. But in this talk, which is going to focus on, kind of, the initial goal of bitcoin as outlined in the bitcoin paper, which is digital money. + +[07:00] On the other hand, Ethereum acts as a general purpose decentralized computer. So what I mean here is that it provides infrastructure for developers to write programs or smart contracts that can really execute anything. So this allows a variety of different applications on the Ethereum network, so things like NFTs or borrowing, lending applications, asset issuance and so forth. And so, finally, Stellar focuses on payments. So Stellar is really good for fast and cheap payments, as well as issuing digital assets on the network. For example, on Stellar you can issue stablecoins such as USDC. So next let's compare consensus mechanisms for each of these blockchains. So bitcoin and Ethereum both use proof of work which we're going to discuss in a lot more detail in the next slide, but for now, I also wanted to know that Ethereum. is actually in the process of a major upgrade, as they're moving away from proof of work towards proof of stake. So, while the intended + +[08:00] Proof of stake model actually has some better properties when it comes to securities and scalability. In this talk we'll mostly focus on the current state of Ethereum and then so for seller. It actually used a completely different approach to consensus. Specifically, it uses federated byzantine agreement, which is implemented via the Stellar consensus protocol, to achieve consensus. So let's actually dive a little deeper and try to understand what different differentiates all these consensus mechanisms. So first I just want to kind of step back and emphasize that a consensus mechanism is vital for the security of the blockchain. It is essentially what allows all the nodes on the blockchain to agree on something in a safe way. If nodes don't agree on transaction in a safe way, it opens up a possibility for double spend attack. So a double spend attack is essentially spending the same money twice. A quick example here: imagine paying five dollars for parking but then spending the same + +[09:00] Five dollars to buy an ice cream. So you received the goods for a total of value of ten dollars, meaning that you got the parking space time and you got an ice cream, yet you only paid five dollars for everything. So consensus mechanisms want to ensure that such a scenario can happen. So now let's dive a little deeper into what a proof of work system is. So to reach consensus, nodes need to agree on transactions to confirm in the next block. In proof of work systems, each validator solves a very difficult math problem that requires a lot of computational resources. It is also proven that there's no shortcuts to the solution. So the only way to get to the right answer, is to actually perform the computation, I e show the proof of work. So validators- the first validator that solves that problem- essentially gets to create the next block. And then validators that create creates that block, they broadcast it to everyone else on a network. + +[10:00] So as other nodes on the network discover the new block, they basically accept it, since it's essentially a longer chain, and then, once they accept it, they can start working on the next block on top of that newly created block. So here again I want to kind of emphasize that validators on the network accept the longest chain. So if the validator is first to mine the block, it basically creates that longer chain that is later discovered and accepted by others. So another thing about proof of work is that validators are incentivized to participate in the block creation that we just discussed, because they receive monetary rewards in the form of coins for producing these new blocks. And you might have heard about this process is called mining and it's very popular. So now let's shift gears a little bit and talk about how Stellar approaches consensus. It's actually a completely different consensus mechanism because there are no computationally heavy tasks. + +[11:00] So instead nodes select other nodes on a network that they trust and they exchange messages with those nodes to agree on a set of transactions to confirm in a particular block, and we call these sets of trusted nodes quorum slices. So let's unpack what quorum slices actually mean. So to understand how consensus is reached on Stellar, let's first look at the examples of these quantum slices that we just discussed. So recall that a quorum slice is a set of nodes that a validator chooses to trust. On the right we have a network of four nodes: a, b, c and d, and we express trust relationship between these nodes with arrows. So, for example, a has b and c in its quorum slice and another example is node b: it has a, c and d in its quorum slice. So now that we know what quorum slices are, let's define quorum. + +[12:00] So quorum is a non empty set of nodes that contains a slice for each member. So, for example, a, b and c is not a quorum? Why? Well, because it actually does not contain a slice full slice for b and c. If you look at the diagram, so you see that d is in the quorum slice for both b and c, so it must be in the form. On the other hand, a, b, c and d is a quorum because it hand a, b, c and d is a quorum because it includes a slice for each member. So now that we know what quorum slices and forms are, the key detail of consensus on Stellar is that network. The network only confirms a block if a quorum agrees on that block, so it needs the agreement of a whole quorum to confirm the transactions. Great. So now that we've talked a little bit about consensus, let's switch to open participation. So what does it mean to have open participation? It means that there's no central + +[13:00] Authority allowing or preventing you from participating on the network, meaning that anyone can join and start validating transactions and participating in consensus. And we wanted to include this feature because it's just so vital for decentralization. As and as you can see, all three networks have that, but we thought it was important to include this feature as well. All right, cool. So next we have some key security properties to go over. So, as you can see, bitcoin and Ethereum prefer liveness and Stellar prefers safety, and you're probably very confused because I haven't defined either liveness or safety. So we're going to unpack this, so we're not going to spend too much time on the actual computer science theory here. But the most important point that I want to convey is that there are three desired properties that consensus mechanisms always want to have. So those are safety, liveness and full tolerance. What do they mean? So? Safety is a guarantee that all nodes produce the same block, the same valid + +[14:00] Block, meaning that nodes can't produce contradicting block blocks. Aliveness guarantees that nodes will eventually produce a new block and won't just hold and you know be not being unresponsive. And then, finally, fault tolerance ensures that the network can tolerate node failure. So either nodes going down or actually becoming malicious and sending like conflicting messages to other nodes. So in distributed systems it is proven that you can only have two out of three of these properties. And typically consensus protocols always want to guarantee fault tolerance. And because consensus protocols select fault tolerance, they're left with deciding between safety and lightness. So what exactly happens when either safety or liveness are selected? So if we choose safety, which guarantees that all nodes will produce the same block, nodes will get stuck until they can. They + +[15:00] Nodes will get stuck until they can all produce the same block. Remember, Safety means that nodes can't produce differing blocks. On the other hand, if we choose liveness, nodes choose to produce a blog, any blog, meaning that they choose to stay alive, but they may produce a block that will later be overridden by a different block as network advances in consensus. So this potentially invalid block is actually what can open up a possibility for a double spend attack, and we'll look into example of this a little bit closer in the next slide as well, yeah. So let's see a visual example of how liveness is picked over safety. So here, yeah, each clipboard icon basically represents a block, and so as new blocks are propagated on the network, nodes can learn about it at different speeds. So it's actually possible for two blocks on the network to have a different view of the blockchain. So remember how nodes choose to follow + +[16:00] The longest chain in proof of work systems. But what if some nodes haven't learned about the longer chain yet? So they continue to follow the chain that they currently consider the longest, and this is how you can end up with a different view of the blockchain, as we try to show in this example. So, on the other hand, let's look at the network that prefers safety over liveness, which is what Stellar consensus protocol does. So the important thing to realize here is that a split that we had in proof of work systems can actually never happen because Stellar consensus protocol prefers, or chooses safety over liveness. So instead, nodes hold if they can't agree on the same block. So if there is no quorum that agrees on a particular block, nodes just hold and stop processing transactions. So we just covered some important security properties. Let's actually see how long it takes to + +[17:00] Confirm a transaction on these blockchains. So every bitcoin block takes about 10 minutes, while Ethereum blocks block is about 13 seconds. Note that we also included the number of confirmations needed to consider the transaction final. And this data is from coinbase. So remember that in proof of work systems, nodes continue producing blocks and as they discover newer blocks on the network, they might switch to a longer chain, potentially abandoning the previous chain they considered longest. So because of this, we typically want to wait for the network to confirm several blocks after your transaction made it into a block. So- and this is needed to reduce the chances of the network switching to another longer chain, and so on coinbase, as you see in it in a table, users wait for three confirmations for a bitcoin transactions and on + +[18:00] Ethereum it's a 35 confirmations. So this is completely different. On Stellar, there's actually no need to wait because, exactly because of the feature that we just described in the previous slide, where Stellar prefers safety over liveness, so because the agreement of the whole quorum is needed in order to produce a block. Once a block is produced, operators can be sure that the block won't later be revoked. So once it's confirmed, it's confirmed, and on Stellar, a block is produced every five seconds and only one confirmation is needed to deem the transaction final, exactly for the reasons that we just described. Okay, so now let's also look at the average transaction cost. So again, we averaged the data over the past 12 months. As you can see, it's quite different acro across these networks, and I just also wanted to kind of point out that + +[19:00] This fees fluctuate is they depend a lot on how much competition there is for a particular transaction to be included in the block. But kind of the most important point that I want to make, here, is that is the order of magnitude difference between Stellar and these other networks, despite the fact that these fees fluctuate. All right. So now let's talk about sustainability. So, as you can see, proof of work systems require high energy consumption, and energy consumption on Stellar is quite low. And why is that so? Remember how we said that in proof of work systems, nodes try to solve a heavy computational task as fast as they can. So this is directly related to energy impact. So heavy computational tasks require a lot of cpu, which in turn requires a lot of power, and in proof of work systems, validators are essentially incentivized + +[20:00] To have high quality hardware that uses a lot of power in order to increase their chances of producing the next block. This is different on Stellar. So because Stellar does not require computationally intensive operations to reach consensus, and instead it has nodes establish trust, relationship with each other and just exchange messages to reach consensus, this can actually be done on the very modest hardware and it does not require a lot of power. All right, so we just talked a lot about a lot of things, and the final feature that I want to talk about is resistance. to 51 attacks. So what are these attacks? So again, remember that proof of work systems nodes that solve a computationally heavy task, they get to produce the next block. So this is also directly related to how much computational power a validator has. a lot of computational power, it is more likely to + +[21:00] Win the block race and have the network accept that block. This means that if validators with over 50 percent of the computing power become malicious, they have the ability to take over the network and dictate every subsequent block. So, as an example, if a top few miners in a bitcoin network colluded, or, for example, a large government felt threatened by bitcoin and invested a lot of resources into mining rigs, the network could be taken over. So on Stellar, such an attack is not possible because computing power is not a feature of consensus. On Stellar, nodes choose other nodes that they trust to decide which transactions to confirm. In the blog. Nodes do not accept the block based on computational power. So this means that even if an attacker adds a million nodes to the network, this will not impact consensus as long as nodes in network do + +[22:00] Not add those malicious nodes to the quorum slices. And so usually nodes on Stellar add organizations that they trust to the aquarium slices. So this means an attacker will need to convince other nodes to you know on the network to add attackers nodes to the quorum sizes, which can be difficult. All right, great. So to summarize: the Stellar consensus protocol is a federated byzantine agreement which allows for open membership to support growing decentralization. It has low latency, which allows fast transaction times. It is resistant to malicious actors and 51 attacks and it prefers safety over liveness to prevent forks. And I know I just said a bunch of things. I'm going to stop here and see if there are any questions. Before giving back to Justin + +[23:00] Any takers any questions about consensus protocols, about the Stellar consensus protocol, I'm always thinking that I trip over federated byzantine, agreement system sometime and I'm like, oh, we should. I want to call proof of vote or something. That's not a question. Shy students, if you have questions, sometimes they put them in the chat too. Marta, oh, yeah, right, we can do chat as well. Oh, somebody, really encourage you guys to ask questions. Yes, yeah, hi, I was wondering if so. If, when you're talking about security or safety versus liveness, like how does seller have low, isn't that what's the benefit of live, of like liveliness, if the speed for transactions is still faster on like Stellar, like why would anyone choose to value liveliness? I think it's just that those. consensus protocols are like completely different, like so on Stellar, for + +[24:00] Example, yeah, you prefer safety, but then you have to also select your quorum set correctly and make sure that it's actually configured correctly, which can be sometimes like difficult. And so, like you know, on this proof of work systems, I think that's just like a feature of consensus. Yeah, because you know this, you can basically have like multiple chains at the same time, and because it's a distributed system and you know these computers need to learn about these different chains. On the proof of work system, then they can have like a partial view of the network at the time. So I, I'm not sure that. Well, you know, one thing I sometimes think about with Stellar is like the validators on the network are known entities, so, as smart to point it out, like they have to trust one another. They have to opt into adding entities to their quorum set. They know who those people are. And so when you value safe, safety right if the network + +[25:00] Halts, what that means is that, remember, it's a decentralized network. Nobody has the ability to turn it off. No one can just turn it back on either. Right, like you actually have to coordinate validators if a network holds, and so I almost also think that like one feature, one necessity for, like a network that does value safety over liveness is that validators have to be able to find one another right to restart the network if something goes wrong. And I think on a lot of like in networks that are proof of work, where miners are basically like anonymous- I don't even know if that would be possible. Yeah, and I think, like because Stellar was designed for payments, like it was really important, then if you know, if something happens, like some kind of split on the network, like everybody just stops, you know there's no kind of forking and continuing to different views of the world. If nobody can agree on one view of the world, everybody just holds. So it's kind + +[26:00] Of like a important feature, given how the network is used, you know, like for payments, for cross border payments and like assets. Thank you, Marta, that was awesome. All right, I'm going. Okay, I'm picking it up and I'm talking about open source contribution. So all of that martin talked about is super awesome. That is, there is a code base, that sort of all these nodes run that allow them to run to sort of talk to one another to vote, to come to consensus to ratify transactions. And a fundamental ethos behind the development of that code base- and it's part of what makes me love SDF- is that it's open source, right. Anyone can review the code, anyone can contribute to the code. No one owes the owns the code right, it's open, it's free, it's participatory. You, listening, can join the good fight. + +[27:00] Today you can work on the code, all you. need is a GitHub account. There's a wide world of innovative Stellar developers, and while many of them are focused on building their own apps and services that make use of the network, which we'll talk about in a second- the sort of application layer- many are also contributing to helping evolve the software that powers the network itself and that software that powers the network. It never sits still. It's constantly being iterated, improved on, worked on in order to fulfill the needs of the ecosystem, including all the projects that are built to the application level. So here's a question: if you have this code base and it's evolving and no one owns it well, how does it evolve? And I'm going to talk about that quickly. Two main ways: Core Advancement Proposal and Stellar Ecosystem Proposal. Bear with me here. Court advancement proposals, CAPs. This is a method that we sort of came up with the foundation. Actually I don't even know who started it. But at this point it is a codified process to allow contribution to come in and amend and update the code that runs. Stellar Core + +[28:00] That martin talked about. So coordinates my proposals. They're suggested changes to the protocol of Stellar. They have a direct effect on how the network operates. They're about 40 plus of them. Each one outlines a specific change and they're used to add new features to the network itself. So a recent example is one called feedback transactions, which allows fees to be paid by any account on the network and it enables an application to cover its users transaction fees. Now network fees are handled at the protocol level. So this new feature, it required a new operation to be built into the protocol and that's why it had to be implemented in Stellar Core and that's why it was a CAP according proposal had to be done that one. Incidentally, we just added automated market making functionality to Stellar Core, CAP 38. I'm not gonna get into it right now, but it was super cool two weeks ago. So CAPs, they have a life cycle, right, it's a multi SEP process to ensure it's high quality and backwards + +[29:00] Compatible. For CAP to be qualified, classified as final and go live on the network, it has to go through all these stages, has to be implemented in the code but then, fundamentally, before it actually hits the network, it has to be accepted by a majority of the network. Validators. Vote on the protocol number. The network runs using the same Stellar consensus protocol they use to ratify transactions. So, governance wise, anyone can contribute to code, can be implemented, but before it is, actually adopted the network has to give it the thumbs up. Now that's different. That count process is different than the other kinds of standards that live outside of the Stellar code, Stellar eco system proposals, SEPs for short. These deal with changes to the standards and methods used to build on top of the network. Again, there are 40 plus of these for various purposes. They're labeled by numbers. These don't alter the Stellar code, but they are technical blueprints that allow developers to agree on how services using the network should be implemented to allow for maximum interoperability. So basically, a lot of the times these + +[30:00] Say: here's how to set up an extra network api. There they detail, like sort of two sides of an interaction: server side, how to create the api, the client side, how to consume the api. Again, they also follow a life cycle from draft to final. I'm going to give you an example. so this is less abstract. SEP 31 is a standard that allows businesses to make compliant cross border payments on behalf of two users. So imagine a user wants to make a traditional remittance payment, right, they log into admittance providers website, they provide funds and they enter information about the recipient and they want to have the money just show up in the recipients account. They don't even know they're using Stellar, but they're working with companies that have integrated Stellar on the back end. So to complete the, transaction and comply with regulations, the remittance provider, they actually need to know the recipient's bank account and routing number and they need to collect KYC information about the recipient and to do all that they're receiving and they need to share it with the receiving party. Right, so + +[31:00] There isn't a direct path between two yet user Stellar account on network and so instead the anchors will create, use a SEP to create these extra network apis that allow them to share KYC information. Okay, that was sort of a lot. Again, that was the code. Marta talked about, the actual consensus protocol. Those two things- SEPs and CAPs- were the ways that the code evolves. But in truth, interacting with Stellar is actually quite simple and because we're actually going to move way up the stack now. So most developers who are building on Stellar, they aren't necessarily working CAPs or SEPs, they aren't necessarily thinking about the protocol level or interacting with Stellar Core in the raw. Every day they're using an SDK, a software development kit, in their preferred language, and those SDKs are interacting with a network api called Horizon. So this three tiered stack of responsibilities, it sort of divides things so that each piece of software + +[32:00] Can focus on its specific purpose right. Stellar Core concentrates on transaction submission and consensus. Horizon provides an ergonomic interface to allow people to submit, to actually interact with the network, and SDKs kind of abstract away the complexity in a variety of different languages. So when you're developing on Stellar you can for the most part kind of not think about what's going on in the hood- at least not all the time- and you can just use an intuitive rest api to submit transactions, query network data. It's actually pretty cool, but again that feels a bit abstract. So let's make it concrete. We actually have a thing called the Stellar laboratory. This is a handy tool that you can use to build, sign and submit transactions to the network and you can use it to query all the api endpoints and to understand what that means and how to use it. We're going to do something quickly right now. I hope this. works. We're going to issue an asset, so we're going to do it on the test net, which is the development sandbox for Stellar. It's just like the main public network, but it's not connected to + +[33:00] Real world money. So this is the process for issuing an asset on the test net. It would be the same for the real network. Quick overview: okay, and issuing an asset requires a number of SEPs and the basic way to do this, which is covered in the developer docs- the easiest way to do this is to create two accounts: one is an issuing account and one is a distribution account, because in order to issue an asset on Stellar, one account has to trust the other to give it the asset and then that account has to make the payment. This might seem weird, but you'll see what it looks like in action, and so I'm going to now open Stellar laboratory. Let's see, are you seeing Stellar laboratory? Okay, this is Stellar laboratory and, as you can see, it's on the test network. I first want to create two. new Stellar accounts, so this is what we will call the issuing account. + +[34:00] It's really crazy. And then, because in real life you would need to actually fund this, every Stellar account requires a minimum balance of lumens in order to exist on the ledger. To prevent spam. In real life, you'd have to fund it with real lumens. But again, we're on the test network, so I can just get test network lumens. Now this account has its minimum balance. That is my issuing account. I'm generating a second account. This would be the distribution account. Okay, saving these on a handy spreadsheet. In reality, you would be very careful with these keys. And again, I've generated that key pair that I need to fund it. And so, again, this is Stellar laboratory and what I've just done is created two new accounts. But, as you can see up here at the top, there's all these different tabs that you can use to actually understand what's going on the seller network you can actually build, sign, submit transaction and explore endpoints. So, to start with, we're just going to + +[35:00] Build a couple of transactions and what we're essentially doing is we're taking that second account that we created, which is the distribution account. I'm adding it here. I say: fetch the next sequence number. I set the fee I'm not going to worry about time balance, but this is me constructing a transaction and the operation type is going to be change trust. So to change trust, I'm going to basically believe in an asset that exists, called you shy, that is actually issued by the issuing account. So again, what I'm doing here is, with the distribution account, I'm trusting the issuer account for an asset called you shy. I'm basically going to see that I have generated the, right transaction envelope and I'm going to sign this in the transaction signer. Let's hope this works. So I need for that I need the secret key, if you recall. I need the secret key of the account + +[36:00] And then I can submit the transaction in the transactions. This is a three to five seconds, as you can see. I submitted the transaction and the transaction is complete. Again, this interface you can use to sort of create any kind of transaction. I'm going to do one more transaction really quickly, which is basically with the issuing account. I'm going to make a payment, so I'm putting in the issuing account. Now I'm fetching the next sequence number, I'm going to create a payment operation- this always makes me nervous- and I'm going to send it to that distribution account. Here is a distribution account. + +[37:00] I'm going to send I don't know 13, 13. Not many. Again, I can sign. this with the public key for the issuing account. So again, this all feels a bit strange, but I'm going to wrap it all together so nicely in the end and once I sign it I can submit the transaction. Let's hope it works. Transaction submitted, okay. So what I just did was that I basically created two accounts. One is a issuing account that actually creates and sends the asset in a distribution account. The distribution account essentially linked to the issuing account and said I trust you to send me an asset and the issuing account made a payment to the distribution account and now that account will hold a balance of the asset. And that was basically. We just issued an asset on seller. You can issue any asset that represents anything with any asset + +[38:00] Code, and it's just as simple as that. Just a few things that you can do in this interface: or you can do it with an SDK- now one. Final thing, as I mentioned, Horizon is an endpoint. I mean, is that api that has different? endpoints? And this interface, the Stellar laboratory, will actually show you how to construct various queries to the Horizon endpoints. So right now, if I go to Horizon and I sort of query the distribution account, what it should have is a balance of this asset that we just created, oops. So I'm going to put it here and you will see that, in fact, if I scroll down to the json response, that it does have a balance of this huge, shy asset. So what I essentially did was issue into. this account a new asset that never existed before on the ledger. Now, part of what's cool is that this actually happened on a test network that replicates the real network, and all of + +[39:00] That payment, all that issuance was actually done on multiple nodes. And now I'm going to open just like a totally different thing. This StellarExpert is a block explorer for the Stellar network. It's not something that we built, it's not something we maintain. It's actually maintained by a company in ukraine, right, and this is basically a way to view actions on the Stellar network, and I can. It's set up to view the test network, so I can actually go to a totally different third party interface. I can look up the key right, and I can see that, oh, look, there is the public key, there's the asset that it issued, there's its balance. Here are the transaction history. Here's what we just did and like this is sort of the way that it fundamentally works, is that, using this api, you can create payments, make payments, issue assets, and all of that activity is actually done on this sort of like common blockchain that you can view from almost any other source, + +[40:00] And so that is asset issuance on. Now I accidentally stopped sharing my presentation, sorry, okay, is there more to do here? And so there you go: those features right: the ability to issue assets that allow you to build an interoperable world, because all kinds of companies or single devs can sort of build, issue assets, build interfaces that connect to the blockchain, and it is very easy to get started right. Stellar, again, supports common development languages: javascript and python. There's extensive documentation, developers can build and operate on the network with very few resources, and there is this test network and we have great documentation which you can find at developers steller org + +[41:00] So for anyone who is sort of here and wants to build on Stellar, you can get started immediately. I guess the final thing that I do want to say: oh no, there's way more. The next thing that I want to say is that, also, if you're interested in learning more about Stellar and doing some hands on stuff with that laboratory, I would suggest that you go to `quest.stellar.org` and, after signing up, with Discord and your albedo wallet, you can basically compete in quests to win an NFT badge several times a year. There are live series where you can compete. This is basically a gamified intro, Stellar- that lets you learn if you're going to do it. I'd recommend starting with the practice. Quests start with quest one and you'll need to create an account, like I just did, and then it will sort of walk you through processes that you need to the resources that you need to complete the quest and you'll get a set of keys and once you submit the right transactions and you can use that Stellar laboratory interface, you can basically verify your solution, claim an NFT. It's super fun, even if you don't know how to code, because you can use that interface and if you run into issues, you + +[42:00] Can always ask questions. In our vibrant community, we have a Discord server of Stellar clusters and actually that was it. So if you want to find out more about Stellar, go to `stellar.org` again, I recommend using Stellar quest to keep learning, and that was a fairly deep dive. so again it was. We looked at the Stellar stack, we looked at basically the interface and how you can issue an asset. And then, finally, Stellar quest, the gamified way to learn more about Stellar, and now we will open it up for questions. + +
diff --git a/meetings/2022-02-03.mdx b/meetings/2022-02-03.mdx new file mode 100644 index 0000000000..7c8fda6a43 --- /dev/null +++ b/meetings/2022-02-03.mdx @@ -0,0 +1,174 @@ +--- +title: "SPEEDEX Lane Operations and Observability" +description: "This discussion examined CAP-42 and the idea of multi-part transaction sets as a way to segment ledger capacity into lanes with independent fee dynamics. The session explored how transaction grouping could protect baseline payments from arbitrage-driven congestion while preparing the protocol for future engines like SPEEDEX and smart contracts." +authors: + - david-mazieres + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [legacy, CAP-42] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +The session focused on the operational and protocol implications of CAP-42, which proposes multi-part transaction sets to allow different categories of transactions to compete on fees independently. The core motivation was mitigating arbitrage-driven fee spikes and high failure rates that degrade user-facing use cases like payments and simple transfers. + +Participants debated how lane-based transaction grouping could evolve Stellar’s fee model without breaking existing expectations, and how much discretion validators should have in setting fees or execution order. The conversation also connected CAP-42 to longer-term goals, including SPEEDEX integration, multi-phase block execution, and improved observability and tooling for operators. + +### Key Topics + +- Motivation for CAP-42 + - Arbitrage spikes crowding out payments and increasing failure rates + - Need to isolate fee competition without raising global base fees +- Multi-part transaction sets + - Grouping transactions into independent “lanes” + - Allowing per-group fee dynamics and capacity limits + - Preventing one class of activity from dominating the ledger +- Validator discretion and fee models + - Limits of the current Dutch auction mechanism + - Debate over per-group vs per-transaction fee control + - Trade-offs between transparency, simplicity, and flexibility +- SPEEDEX and future execution models + - Dedicated lanes for SPEEDEX-style trades + - Preparing for multi-phase or parallel execution + - Avoiding intermingling of incompatible transaction types +- Operational concerns + - Guard rails for lane capacity and validator configuration + - Detecting and isolating repeated failing transactions + - Telemetry, logging, and observability requirements +- Developer and ecosystem impact + - How wallets and Horizon would surface lane behavior + - Predictability of fees for users and applications + - Documentation needs as fee policies become more nuanced + +### Resources + +- [CAP-42: Multi-part Transaction Sets](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) + +
+ Video Transcript + +[00:00] Hello and welcome to the Stellar Open Protocol Discussion. In these meetings we discussed Core Advancement Proposal, aka CAPs, which are technical specs that suggest changes to the Stellar protocol. They're necessary to allow the protocol to continue to evolve to meet the needs of the ecosystem. So we live stream these meetings so that anyone who's interested can follow along. Although I do want to note it is a- technical discussion. So if you're watching, you may want to take a look at the CAP link to in the show description. There's also a description. In the description all the participants are listed. We do keep an eye on sort of the dialogue that's happening in the chat box, but the goal really is to talk through questions about the CAP that we're discussing, which is [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md). So we will only really address questions if they're directly related to that goal. But we will collect them and take a look at them later too. Today the CAP we're discussing is [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md), titled multi part transaction sets. It suggests a mechanism that makes it possible to implement different fee + +[01:00] Structures based on transaction set composition. So currently, in transaction set composition, it can sometimes be heavily biased towards undesirable behaviors, including spikes of arbitrage trades that eventually fail. This CAP basically empowers validators to isolate subsets of transactions that can compete on fees without causing overall fees to increase, and it also makes it possible to expand policies to other dimensions in the future. So [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md). Again, it's linked to before this meeting, [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) is drafted. That's the link to it there. It was also discussed on the mailing list and there's a link to the thread to that discussion thread as well. So our goal today is really just to see if there are any blockers that would prevent us from moving it forward to the next step of its life cycle. And I will stop talking and I'm going to turn it over to Nico, who is the original author of this CAP, to see sort of what. If he has any questions that he wants addressed by this meeting, Nico, take it away. Hi, + +[02:00] You can hear me. All right. So, yeah, I'm happy that I got to get [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) because you know it is the answer to everything, of course, and in particular the problem with fees and I've already networks in some ways that we're not necessarily happy with, and when I say we here, it's the community that has been also complaining quite a bit about the number of failed transactions or like patterns that are just not the type of patterns we want to see on the network. So, anyways, like maybe a little additional background that is not in the CAP itself, is that it's actually on the dev mailing list but not in the CAP. Is that, as part of the of this, + +[03:00] We actually had to roll out some changes to overlay in back in December that we're dropping, based on a heuristic like- but we think are likely to be arbitrage trades and which mitigated to certain extent some of the things that we were seeing at the time. But that has a lot of undesirable properties and I think, as part of [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md), my hope is that we can actually go and fix the some of those aspects that we introduced at the time. So, in particular, like the type of aspect I'm looking forward, that to fix here is- and this is actually true in all networks- like it's not actually a Stellar specific type of problem. There has been discussions in the past + +[04:00] Few months on Ethereum and solana- I'd like to name a couple- on changing these structures. When it comes to this kind of spam- and when I say spam here I mean it's kind of a funny thing, but it's basically like a natural thing that happens, I guess when you have an opportunity to. For somebody to make money is based on the cost that you have to submit transactions. If you can make, let's say, a thousand dollars on a trade, well you'd be okay actually to pay 900 dollars in fees because you make some money. Obviously this is like. This is where, + +[05:00] Like the way you can end up with and at the same time, if all, if all the transactions on the network are kind of derived on the step of keys, then it kind of kills off a whole category of other use cases that you want the network to have, so, for example, payments or like simple airdrops or whatever like people want to do, that are actually unrelated to those, to this type of trading activity. So, anyways, that's for the background on why this CAP, and yeah, so, like the I don't know like people had time to look over in more detail at the refreshed CAP that I did it yesterday. + +[06:00] I did it yesterday, like it has actually two parts to. It is one part that is extending the what is attached to a transaction set, basically so that we can add, like new properties to the transaction set. And the second part is actually the actual reason for this gap, which is changing how fees get calculated within a transaction set. Sorry, is the latest version the one that's in the master branch in the repo 22 hours ago? Okay, yeah, that's it. Yeah, and I'm sure there are typos because I kind of updated it quickly yesterday to make sure that people had time to and to have access to it, but it's a early trough summer. Did you raise a hand? Yes, I did otherwise. I tried to be polite, but Niko didn't see. So, first of all, I think this is. + +[07:00] First of all, I think this is pretty cool because it takes advantage of something that's unique to the ecosystem and, like, we can actually prioritize transactions not only based on fees. This is very different in other ecosystems because, you know, validators are kind of like incentivized by transaction awards, so they don't have the motivation to actually push these kind of changes. It's pretty cool. Nico, I'd like to hear your take on. We've been talking a lot about strategy for 2022 recently and things like speed x and things like smart contracts in the future. These are, like you know, more transaction engines or like transactors. How will [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) interact with these or how do you think they'll, like it'll, interact with these features? Yeah, that's a good question and I think the this is actually in line with. When I wrote it, I was. I thought that we would actually, over time, kind of create more sea lanes or for lack of better words. So an example of + +[08:00] A three lane that I think would make complete sense is to have a separate lane for speed x trades, given that they're a lot more efficient compared to existing DEX operations. And yeah, we basically say, okay, if you have the choice between a speed x trade and a submitted a managed offer to the network. You're going to go and have more space basically on that ledger. And for smart contracts, I mean it's, I mean it gets, I think, a little more complicated. When we talk about smart on tracks, like a very primitive version of this would be. Well, we are going to maybe create a lane just for smart contracts. That would be like a simple first step. Obviously there might be more interesting things that can be done + +[09:00] On that front, but at the minimum it would be you would create a different kind of free regime right for smartphone tracks. I mean, I guess the thing is it seems like not really optimal for those use cases, like in those use cases it would be much better to just partition the transactions because you could, you know, then you could just you might want to, for example, say that all the smart contracts get executed like after all the regular operations, right, so just kind of having these like intermingled, like this index set, just seems like it's gonna add a lot of overhead and not be you know what you would want. So I kind of have two reservations about this right. One is that it just seems like a little bit too complicated for what we really need, especially if you want to start having you know blocks with, like 100 000 transactions, for example, which would be like totally doable with + +[10:00] Something like speedx if they were all speedx transactions, right, you're not going to want to also have a added overhead of like sanity checking these index checks and whatever. You're just going to want to blast through your array of speed x trades and do like a parallel execution of those. And I guess the second thing that like bugs me a little bit about this: it just seems like very indirect, right, either we believe that dutch auctions are the right thing or we don't right and maybe dutch auctions aren't the right thing, but what we're sort of trying to say, like well, we're sort of going to keep the dutch oxygen, but we're sort of not, because we're going to let validators really kind of pick and choose different sets of transactions that are going to pay different fees, and I think the reason is because we want to filter out spam. So, like that is a form of censorship. Maybe it's okay, but like maybe we should just like be a little bit more direct and say, like validators, should you know, we're gonna just have a way for like different validators to like filter out certain + +[11:00] Transactions that we don't think are productive and to the extent that you might not want that, you might want to say, well, what if my transaction is like really important, then I should be able to like pay a high fee and like get it in. The problem is that this doesn't actually solve that problem. Right, because I could put a maximum fee of you know two to the 32 lumens in my transaction. But a validator who wants to censor me could like put me in my own group with a very low base fee and then put everybody else in a group with a higher base fee and it'll look like: you know, like the block with that transaction will like not compare favorably, potentially to other blocks. So or I mean whatever you could just like filter out the transaction anyway. So I just I don't see, This doesn't seem like as much. It seems like this is partly to solve a problem, but also partly to I don't know like for to appear to be preserving the dutch auction when we're + +[12:00] Not. And if we're going to have some extensible way for validators to filter out transactions, why don't we just do that in a more straightforward way? Well, so first of all like the when you say, like we're not preserving the auction mechanism here, what I'm saying is that validators, when they nominate those transactions, those transaction sets, the expectation is that they would actually be doing this type of action in any given group. But if they can choose the groups, they might as well, not, you might as well. Just let validators choose the fee for every transaction and just as well. It's the same thing. Yeah, you can do that straightforward thing: just annotate each transaction with the fee. That would, like you know a 32 bit number that's like or 60 on the right list actually was to just annotate every transaction with the fee. Yeah, but that's like. That's actually even less transparent in terms of what + +[13:00] The intent is. So that's why I didn't do that. Like it's a like that's effectively what you're doing. Yeah, no, I know I'm doing that like I'm basically allowing for this, but I don't want, people to be doing like, arbitrary fee structure per transaction. But that's what you're enabling, right, you're just making, I mean, I know it's being enabled, but that's not what we don't expect for the data to do that. And actually this is there's a discussion in the later in the document that talks about- because, like, basically today, validators can already censor quite a bit, like what gets into a transaction set. So the example of like following the fee structure today is already not exactly right. Like, if validity doesn't want to do it, they just don't do it. + +[14:00] And yeah, like here, what I'm trying to promote is more of a compact. I guess, like the yeah the. Those indexes thing, I agree this could be done, maybe in a slightly different way. I was trying to preserve the transaction set itself, like the existing transaction sets. But that's not good either. If it turns out that we want to like, actually change the order in which we execute transactions, then like, preserving the transaction set isn't actually that useful right? We already so for speed x. What we are. What is in the current proposal is to just like we partition on type and it's actually not. We actually keep the transaction set at it as it is like from a serialized version, because we can always partition after the fact. Yeah, but it just seems more useful to have it partitioned ahead of time. If you have sort of like just totally different phases of block execution, I see no + +[15:00] Execution, I see no intermingling transactions from the different phases. That's true. So I mean I guess what I'm saying is like this will get the job done. But I can think of sort of much simpler ways of getting the job done. And it seems like this CAP is too concerned with sort of like appearances and those appearances you know effectively don't matter when what you're doing is equivalent to something where every whatever validator wins nomination can just like literally set the fee for every transaction. So like we're giving value. If we're going to give validators that power, let's just be upfront about it. I mean, I thought it was pretty upfront, I was optimizing again for like ease of audit and like maybe, like more compact potentially format. But that's true, it could be more compact or + +[16:00] It could be less compact, depending on how many. If you have an extension point per transaction, you at least have an integer, their transaction additional. Well, you could just change the transaction set format and add like a four byte feed, you know no, but then less expensive, like David, can you articulate your alternative for a second, so that we're all on the same page? But the thing is that what we are discussing here is more like we're actually not discussing. I think we are discussing more like the format of the, like the transaction set. We're actually not talking. I don't think we are discussing- and maybe we should start with that- the premise of allowing to control fees. Right, because I think that's the important one. I mean, I guess I can see two, I can see I don't have a particular sort of alternative proposal, because it seems like we're trying to do two different things and those might be better + +[17:00] Served by like two different mechanisms that are you know each that, some of which is like maybe less complicated or more efficient to implement than this right. So the first thing that we want is we want kind of multi phase block execution down the line, so we'd like to lay the groundwork for things like speedx and smart contracts and stuff to coexist with the legacy Stellar transaction format. And then the second thing that we want is: we want to like weed out spam, or you know transactions that are gonna fail a lot, and so I just think you know these could be handled in two separate ways. Or if you wanna have multi phase blocks, let's just have multiple phases in the blocks and potentially have like fees per phase. If we want to weed out spam, then let's just, you know, have a way to weed out spam and, you know, regardless of fees, we can just weed out the transactions + +[18:00] Without spam, without doing it via fees is kind of what we're doing now and it's actually very hard to keep up with. Whatever people do- like you know, people are very clever right- they will find better ways to get around it. What power does this give us to weed out spam that we don't have in the existing consensus layer? In other words, I agree that we want to be able to kind of unilaterally update, like update Stellar Core, without going through the transaction layer or, like you know, eventually have like a configurable algorithm to weed out spam potentially, and maybe different validators have different mechanisms, but I don't see how this change to the consensus layer actually gives us more power to do that. It may give us some sort of plausible deniability or something that works. It actually gives quite a bit of new super powers to the network because, + +[19:00] Like I was saying, earlier, like if I have an arbitrage trade where I can make a thousand dollars, ideally, I would want people to actually pay up to a thousand dollars in fees. if they are really serious about they, transaction. Right now they don't. That's not what happens, because they're actually competing with the entire pool of transactions and they are filling up entire ledgers and then the ledger fees go up to, okay, 20 bucks, you know, for a transaction, but that's so. That's not a thousand dollars, but that's still ridiculously high. And 20 bucks is not the number I think. It was like what we saw was close to five dollars, I think, per transaction. So your point is you want the spam. You just don't want it to be as profitable like if someone can do like this. Yeah, that's what that's kind of. The idea here is that, like you know, in the current proposal, I'm say I'm saying, well, + +[20:00] Any DEX, any operation that may cross an offer is going to be put in a separate bin, and then that bin, we do this, the action system on that, so, and then if you're not interacting with the DEX, then you're in the- you know general group, so it's. You know, that's like first step. Like you know, we can probably do much better than this, but you know so why don't we just throw in phases and say that, you know like, that's what I'm saying, that's already what I'm saying. Sorry, I'm saying, instead of indexes, let's just have straight up phases and let's just execute them in. You know, like, it's just such that like the DEX operation, oh, like you would want also execution order to change. I mean like, yeah, that's fine, we want to, like, we want to like. Segregate is going to lay the groundwork for paralyzation, right, and so if we're going to do a change like this or segregating + +[21:00] Operations, eventually we're going to want sort of the community of operations in one group and the non community in another, and so the legacy DEX operations are exactly never going to be able to make community. Yeah, like, but like I think the issue here, if we have groups in like impact, somehow apply order, like, because we can have transactions with multiple operations. I think you can basically take that, use that to your advantage to maybe get executed in front of somebody else just because you happen to have the right operation. No, it's because you don't. You'll be. People will say that the like, we'll flag certain operations, like path, payment and you know, manage, offer and whatever, and say that these visa any. You know there's a first phase, which is, might have a lower fee but the. In order to get into the first phase, there's certain operations that are off limit, like the ones that we can't necessarily make efficient. And then in the second phase, you can, you know, have arbitrary like. My point is that + +[22:00] This forces the to think about those feelings or whatever. have to be somehow completely independent or something like, and I'm not sure it's. You can always claim that. Not necessarily you could have like. You know, like I can see, like for speed x, like things like that. Yes, you know, that makes total sense for the existing transaction subsystem. I don't know if, well, I think, we're going to want to have, like, fast payments and faster payments than we can do orderbook operations. So to just to you know if we're going to be making this change, like, why not make it in a way that is will kind of facilitate that future where maybe, you know, payment processing could be parallelized, even if orderbook processing can't be. + +[23:00] It's more like I think of this as it's. It may be a multi dimensional thing, coffees, and then how do you turn that into a, something that makes sense from a, like a, an ordering point of view. Right, that's kind of. I think I don't. I think that's leave it as open ended as the current draft. You could just say that we're going to do that with our groups. Sorry, I think if you say they are groups, if you're linking the groups to execution, or yeah, so you have to apply other groups, yeah, and so each group has it, has a different apply order and a different fee structure and then we can kind of, you know, have different rules for different groups. If you want to execute early and fast, then you know you should have a transaction that goes in one group. I think we could + +[24:00] That goes in one group. I think we could also. I mean it might be worth considering, if we're changing the fee structure anyway, to have a separate base fee for successful versus failed transactions. Right, no, that is wrong. Like that doesn't work, like I don't see, like doesn't work. It makes things so complicated in the existing transaction subsystem that we've talked about that one a couple times before. Yeah, that would be pretty tricky. It's super complicated because you can merge accounts. That's kind of one of the reason. For example, one of the things that can happen, you have to refund people at the end. Yeah, it is kind of crazy stuff and it doesn't, and this actually doesn't solve the. It's not that we want to penalize transactions. You're going to have buckets. The buckets could. They could be morgan your granularity on the fee, but that's fine. I believe you, that would be too hard. + +[25:00] For me. The most interesting question was your first question, David, which was just: like, do we believe in the dutch auction? Still, like, let's just like start with that. Like, do we think that is a even works? Like, do we think that is a good system for blockchains? Because, like, we thought that what like in 2018- well, maybe 2018 us was wrong. Right, like I wouldn't deny that 2018 me was wrong about a whole bunch of stuff. Yeah, but that way. But the thing is that if you, even if you go back to the regular fees, it doesn't solve the problem that people are still going to be a stupid number and you take over the ledger, agreed, but, like, if we don't believe in the dutch auction thing, like one of the things that I mentioned in, like my email, was like we could just like chart instead of like assigning a base fee to different groups, we could just say like, hey, like, if you're inserting groups, you actually pay your bid. So, instead of being like a dutch auction, now you become like a first price- + +[26:00] Yeah, I guess like a first price auction. basically. And then, like that, that's really punitive, basically to people who choose high bid, high fee bids, but then if you choose those high fee bids, you are guaranteed to pay them, basically. So it avoids that situation where it's like: oh, there's a thousand dollar opportunity, you build 900 dollars, but you end up paying 20. It's like, no, I mean transactions, right. So this is one thing that's changed now. So one of the reasons why you might want to have bid a fairly high amount in 2018, is because you were like pre signing a transaction, right, and now we have a solution to that. It's not just that, but also like for, like, clients, like it really avoids having to keep track of market activity like the activity, like, and especially if you start to say, okay, the predicting fees is basically is becoming even harder. I think you have to have this kind of mechanism. Well, no, but I mean what this + +[27:00] Mechanism? Well, no, but I mean what this current mechanism is. Basically, you say the maximum fee and then whichever validator wins nomination decides how much you actually pay, right? Like that's what this is going on. But it's sort of right, looks like a dutch auction kind of, because there's these groups- are you running the dutch auction? But like, if I can kind of pull people into different groups who are paying, who are gonna end up paying different amounts for the same good, then it's like I might as well just set the price. You know it. Just it seems like it's trying to look like it's still a dutch auction when it's not. So let's just be more directly what it is, which is validators setting fees per transaction. But it's like saying, oh yeah, like because, like, validators may decide to not do what we want people to do, then let's throw away the whole thing right and make plans as complicated as possible. No, I'm just saying, like, figure out what we want validators to do and just directly give them that power + +[28:00] Instead of no, but like that, that's fine, like we already like. That's the point of this CAP is we give them that power but then saying we also get rid of the bidding aspect. Well, validators could decide to run a dutch auction, right? The point is, if we, if we're giving values the discretion to set fees, then one of the things they could do is run a dutch auction. But there's no reason to still have like to have CAP five somehow wired into the consensus layer. CAP five is now going to be true. Yeah, that's yeah, exactly. That's actually that's what the [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) already says: it there's no more search prices. We're running these, dutch auctions per group, which just seems like an inefficient. We're adding this unnecessary level of indirection. Right, we're giving, validators the ability to set the fee of every transaction. And then we're doing it in this like weird, clumsy, bucketing way that you know is gonna require you to like check large sets of integers that they're not overlapping and all this other stuff, which is just + +[29:00] Like you know why, just like set the fee per transaction. But how would you resolve contention if it came up like if everyone submitted the same kind of transaction all at once and there's contention for ledger space? In that situation it's just, you know, highest, it's basically the normal. You know, like I'm speaking at auction or a first price auction, but just not at the consensus layer. Like it would be the block, the nominator would be the one who's basically setting the fee rules for the block, as opposed to having happen at the consensus layer. And like you don't like, when I was saying that, like we don't necessarily want to do the dot auction, I wasn't saying we don't want to do it for any transactions. But what do you do? But there's no reason you can't implement a duns auction, like as you're nominating a block, right, but we might want to like for certain types of transactions that we think are very benign, we might actually want to mandate it, basically leave it, giving clients the opportunity to do very simple fee estimation, + +[30:00] And then if you're trying to do something that's very, you know, profit generating, deliberately profit generating but not otherwise useful, then you have this more complicated fee situation to deal with and you pay higher fees, okay, but if you want to audit that, then it's going to be easier to segregate the transactions, right. I think I'm not opposed to that at all. We can say like I could definitely see a thing where we say like, at the consensus layer, you know, group zero is transaction envelope, group zero is going to be not going to allow orderbook things and it's gonna have to be a dutch auction, right. And then like number one, you know, group one is like discretionary. Sure, yeah, that I would be super comfortable with that. But like for me it just comes back to like: do we want to do stuff like that? Do like, if we want to keep the dutch auction aspect of it, I think we should probably mandate it in some parts. And if we don't want to keep it, then we should just be honest, like you were saying, and just like, not even have any pretense of doing it, + +[31:00] And just say, okay, like validators, you choose whatever you want. And this is like [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) supports this. Basically, my argument here would be: just like, remove the directions for what you're supposed to do and say like, do whatever you want. I mean like to mandated, you have to actually include. Whatever rule is used for partitioning has to be part of the protocol, then, exactly at least, which makes it into that one subset the rest of them, which means that for any change we want to make to this partitioning scheme is a protocol change only to that one partition. Well, like it could be a parameter too. It could be, like you know, a bitmap of the operation types that are allowed in. Like, actually, the first version of this CAP was not. It was not operation type. It was based on which markets you would, will you be, you know, doing so. It was much more granular, + +[32:00] So that's not the type of thing you would want to. You can encode as a parameter it's, but those would be the things that aren't in. So basically, like when you do a path payment, you are not guaranteed to pay dutch auction to get the dutch auction treatment. If you do a simple payment, you're guaranteed to get the dutch auction treatment. That's exactly the kind of thing I'm talking about. And then, of course, whether or not you get the dutch auction or something close to it would depend on the particular validator, and that validator could decide. It depends on the particular assets that you're touching. Do we think that changing the fee structure for these kinds of operations- meaning like these non the non super benign, very efficiently implementable operations- would make a big difference? I think. I think that, David, you were already questioning that, like: is this enough control to discourage those kinds of things or + +[33:00] Split things up appropriately? Is that right? Is the fees? The thing that matters is the segregation in terms of number of those operations that can enter the transaction. Set the right thing to control, like, what is the thing that matters? I mean, I, you know, I think that, oh yeah, of course we want to get you know, we want to. Some of this is going to be subjective, right. It's going to be like validators are gonna have an idea that there's some stuff that is, you know, more valuable to the network than other stuff, right, and then people creating transactions are gonna have some idea of how important their transactions are, which is going to be reflected in the fee or the max fee that they're willing to bid for that transaction. + +[34:00] And so I think we kind of, we like the idea that you know if my, transaction's really important, I can like pay a lot of money and get it included. This CAP doesn't actually guarantee that right, because I could bid like a large amount for my transaction, but the validator could, you know, either not included or could include it in a transaction set with a low, a subset with a low base fee. And then this my, the block that contained my transaction, would like compare unfavorably to a block that has like higher fees for other transactions, even though their max fees are lower the way that, so that we actually almost never compare blocks. It's true, that's true. So, and then the spell. That's actually already going to happen today. So I don't want that. Okay, I suppose there's not really much guarantee that any validator will include your trend. Like you can imagine + +[35:00] That, David, you know you like want to send Justin a payment or something, to like close out a payment channel or something, and you're willing to pay like a million XLM to do it, yeah, but like maybe me and Nico and Tomer are the validators and we're just like, yeah, maybe not, and then we just like refused to include your transaction. Is that, you know, if some validator eventually, if there's some validator willing to do it and that validator eventually wins consensus, then it will, your transaction will see the light of day. Yeah, right, you need consent, you need some offline agreement, basically to really censor transactions, and that's that doesn't change this CAP. I don't think this CAP can change that. It changes it a little. Well. No, I guess not really + +[36:00] Well. No, I guess not really not any amount that matters. I think we have not heard from lee and not much from tumer. Where do you guys kind of stand on all of this? I kind of want to understand. It sounds like David has a much like more simplified approach. I'm trying to understand, like from a mechanical perspective, what this looks like. What is, what are the protocol changes required, if any, for validators to start assigning random fees or not, random fees to transactions, to transactions? Yeah, I'm yeah, I think about this, but I think, like David has a point, if we're just like pretending with this like group based approach, you know like we can implement that logic without that. So + +[37:00] I mean like, so the group thing is more like yeah, like I don't really care, that's more like I said, it's a sterilization format problem. Like I think we still have like the new thing, I think the new point that David raised, which is interesting, is: should we basically hard code the, this split that is in the proposal right now as an optional thing, so that should like this split, being that there's like a presumption there that non DEX operations or, yeah, transactions, we're not going to need to do to + +[38:00] Split that further, you know, oh, sure, or without going through consensus, sure, I guess? I guess, put another way, do we feel that for certain kinds of transactions, people should be guaranteed the dutch auction and the validator shouldn't have discretion? That's the question I was wondering too, David, and I think Nico thinks the answer to that is yes. I am also inclined to think the answer to that is probably yes. I'm not sure, maybe I'm wrong that Niko thinks that, but I think he thinks that, based on his reaction when we were talking about removing the dutch auction. I mean, I like the, that system that you basically express, you know, a maximum bid, and then you, and then it's the job of adidas to try to minimize this for you. Okay, so this does not currently guarantee that right, [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md). + +[39:00] So the well, there is no. I guess, like the yeah, there's no guarantee either, even with the current system. Right, like I could basically decide as a validator, or all the ideas could decide that they're going to group all expensive transactions in one ledger. I know you know that ledger is the stupid ledger. Right, yeah, at least you know that is a valid dutch auction. Right, you know, at least you know there's, you know 999 other operations that are willing to pay that base fee or whatever. Right, but I mean, or you could say like fine, you don't really have that guarantee because you know the validators could hold the expensive operations. But it's the. So I guess, like maybe what you're getting at is we're effectively giving validators the + +[40:00] Ability to just set a different fee for every transaction. Then let's just do that and not like wrap it up in some kind of auction thing. No, but I can't so either. Yeah, I mean, like it's, I agree, but like. The alternative to this would be to say, okay, like, because your point about okay, I'm going to have a transaction where I put a fee of million dollars, right, and I mean as a transaction submitter- I would feel very nervous about this because now, if I have a leader that wants to play a joke on me, they can and they would actually pay nothing. I mean nobody's actually. I mean only that one person is going to pay one million, right, and that's what [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) lets you do. So, right now, I have 42, lets you do that. I, the I- the original version that I wrote was actually requiring that those groups would be of a minimum size to actually avoid this type of situation, + +[41:00] But I thought it was maybe not, So basically you require to do this, those, this bidding system on at least some number of transactions, of transactions. But yeah, I don't know, that would maybe mitigate this and it kind of makes both versions work in a way because now you can have arbitrary sets or arbitrary groups where you do this bidding thing. I mean I wonder if the right thing- I'm just kind of breaking it down- that this is not very nice in terms of, like, how much of the ecosystem it impacts. But I mean I almost wonder if you sort of you want to give people submitting transactions a choice. So if I had some flags on my transaction, then I could say, hey, I'm willing to. I prefer the validators play with the. + +[42:00] I prefer the validators play with the priority of my transaction delayed, or I prefer that validators play with the fee that I'm paying, or something right. And so then you'll, either you'll, depending on what you select. You'll either pay a potentially very high fee if validators want you to, or you'll get de prioritized if they don't like the fact that it's a path payment or that it's touching some asset that we think is a spam asset. Yeah, I strongly oppose that idea. I think that we have a problem right now with the dutch auction, where people don't understand it because it's so different from other networks, and understanding, like adding other dimensions, to like prioritization besides just feet, is just gonna confuse the hell out of people. Well, what if there's just one bit which says I want to participate in the dutch auction, or I want to pay this fee + +[43:00] Auction, or I want to pay this fee, and so then like yeah, why? I mean I still understand why you would want people to like what the current contract with the network is that you put a fee bid and you may pay up to this number. That's good, that's the contract. It's fairly simple, I think, to understand. Maybe you don't, maybe it's harder to understand when my transaction is going to be included, but at least you know it's. The contract is simple. Yeah, I agree with Niko. I think that, like the current contract doesn't have to change, even if the underlying mechanism changed. Like, basically, this is the amount, the maximum amount I'm willing to pay. Maybe, if it's an opportunity, I'm going to pay the whole thing, maybe it's for something else you're going to pay. Like the validators can change the logic without having to have, like wallets start like exposing a weird interface. That's true, but we are the validators are gonna have to make a decision, right, they're gonna say, + +[44:00] Like, either you're willing to pay a lot and either I'm going to- you know- charge you a lot, or I'm gonna charge you the same as everybody else, right? So we're now giving the validators that discretion and you know, if we think that the dutch auction is a good thing and we think that certain transactions should be guaranteed the dutch auction, I'm not sure I believe this right, but it's plausible. Like I'm not taking it aside- then maybe make it explicit in the transaction itself and as opposed to, I don't see how, like that, you can make it really explicit. I mean like you have to have a market to afford this to work. That's why I was saying like, maybe it's more like I ensuring that people are actually part + +[45:00] Of a market. When they are doing this building, they expect some bidding. Like you know, I want to be competing with at least five people or ten people. I don't know what the number is, but like sometimes something like that. Can we go back a little bit to this, like if we actually combine the groups and apply order, like make them the same thing I think this may create. It depends if, like how many groups we have, but like I think, if we think that people are going to be, if we want people to add more groups of our time. Doesn't that leave too much? Does that give too much kind of power + +[46:00] To validators now that they can control apply order? Right now, they don't really control that. Well, I mean the rules for apply order- could go through consensus potentially, or some rules, I mean so like I could definitely imagine a situation where, like you know, in the long term, there's like four groups. You know, there's speed x, there's fast legacy operations, and then there's like smart contracts. Yeah, exactly like so those groups. I understand those groups. I don't understand how, inside those groups, fees would impact apply order. Okay, two things to interject here. One: there's actually already a CAP about arbitrary transaction orders, so we should probably all review it at some point and incorporate that information into this conversation, because we probably already argued about this like + +[47:00] Three years ago, and since we didn't do that for all the reasons, so we should learn what we learned back then. It's 14. It was written by jeremy. I don't remember what it says either, I just remember it having it. I just remembered having the word adversarial transaction ordering and I'm like, ah, yes, the. I think the word adversarial made this pretty unpopular. But the second thing I want to add is like I think, probably to make this grouping thing sane, the fees and the groups would have to be on different, like unorthogonal axes. Otherwise, like this is gonna just be a nightmare. I think Nico, probably, okay, what's that? What's your? Sorry, I missed your comment. Like you were saying, like how would the fees impact the ordering within the groups? And like you know what about the number of groups and stuff like that? I mean probably like fees and apply and like + +[48:00] Phases should be separate, kind of grouping or tagging or whatever system. I know that's what I'm saying. Like I don't like the idea of mixing the two concepts, yeah, I don't think they should be mixed. I think that would be a mess, that'd be really complicated, right. But like maybe the things that maybe as part of those changes, if we mess with the transaction set, we should make it that it's easy to add more phases. I guess even if we're not adding a phase with this change, because we don't have the need for that right now, I think we- I mean I can see, given that I'm adding this generalized transaction set thing, like maybe it should be easier to have phases to it. Now, I don't think it's that easy actually, because it's using this index thing and yeah, maybe I should just ditch that. And then instead it's like you actually have actual + +[49:00] Vectors and yeah, I would like that more. So we are getting close to the hour and I guess my question is: you know it seems like there are there's more to discuss than there's more to think about. What's the best way to move the discussion forward? I mean, I think there are like a few. So there are like a few interesting questions here. There's the- yeah, the action system, like what do we do with this? And then there's the actual structure of the like: how can we do, what type of groups can exist or not exist outside of the protocol? So I think there's also work that there's something that doesn't need to go through the transaction there. I don't know how you guys have, like what the current state of this is, but I think + +[50:00] Current state of this is. But I think that there's going to be some configuration, some way of configuring Stellar Core to say, like, which transactions we think are, you know, should be prioritized, and so that doesn't need to go through the consensus layer. That's what's going to like, you know, basically implement the discretion that validators have. But having some part of that kind of design sketched out, I think would potentially inform the debate over what the consensus layer, like transaction segregation mechanism, should look like. There is a like a super duper, ultra primitive version of that already exists- super primitive. But I do think it'd be interesting to kind of think about: like what could we actually make expressible in a pretty simple way because, like, if there's nothing that's really useful that we can express, then that really lends itself towards saying like, okay, like these things probably like too complicated to express, they're not configurable, validators + +[51:00] Won't be able to do it, etc. Whereas if we can think of lots of useful things that you can express simply and it's like actually like we should feel free to give validators a lot of power to make you know same decisions and they can configure it easily. I think that matters a lot. I mean like the- yeah, like the. I guess they're like different things here. There's like, because my, what I thought here, my intent here was not necessarily to make it like something that you would necessarily, you know, do in as a config right. It's not like a config file that people deploy. I thought it would be more of a. You make actual changes to Stellar call and then those things get, you know, I mean I think, a config file would be better because I mean it would. The point is it's better, but it's it gets a lot more complicated in terms of I mean I really changing Stellar. Changing consensus, of course, is easier, right, and we'd rather tell people, hey, you know, because of this thing, like, + +[52:00] Please compile the you know, this latest version of Stellar Core, which is still the same protocol version, but that's still pretty heavyweight. So it would be nice at least to rule out that a config file isn't sufficient. How do you feel about a hook David like what we do for history? Maybe, yeah, because then we could just like, basically just like, vomit out the set of transactions that we have in memory at your script and say like, hey, man, this is your problem, you figure it out, yeah, or you know, it could even be, if we keep care about efficiency. It could be like a plug in or something, right, that you know, but like what does it look like? Maybe the plug in just like assigns a small integer to each transaction, which is it's sort of it's bucket number or something, and then there's a config file that assigns minimum fees or relations of fees between buckets I don't know like, and right now I'm just making some up so. But to have at least one concrete design on + +[53:00] The table- not an implementation, just a design, I think would make the, would make it a lot easier to discuss [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) and the various possible changes to [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) in like its concrete context. I mean, like the other thing that would happen that sounds like it's getting into. Let's see how. What type of complicated policy we would want to implement and is too expressive enough, or is it too complicated for the things that we want to do? Well, like I mean. Like which policy. Like I had a previous example of policy in there that I thought was way too complicated in the context of this guy. Because for me what I want the CAP to be- is actually what are the consensus changes? Like I like the things that are really part of the protocol. I don't in a way like I want to. + +[54:00] I don't in a way like I want to kind of set aside as much as possible, yeah, like those type of more complicated changes that actually we can do later on, and this doesn't require a CAP. No, but I mean I just you know I want to. No, but I mean I just you know, I want to know, like you know. Like, for example, I don't know if this is true, but maybe what we want to do is we want to keep track of, sort of, for each asset, the fraction of failed- you know, path payments that involves that asset and you know. And then, for when it crosses a certain threshold, we want to, like you know, de prioritize or raise the cost of transactions involving that asset, or you know just I, but now I'm just making that up, right, but like, let's have an actual concrete thing of like places where this would be useful and how it would work in Stellar Core, and once we have that, I think it'd be easier to say like well, do we really need [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) or would it be simpler, just like set the price for each transaction or to have like separate arrays. That would then also facilitate the phase thing, because it could be that the phases and the fees for spam should + +[55:00] Be solved by two totally separate mechanisms. Or it could be that like we should have one mechanism and like [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) should basically like solve both problems at once- and I actually don't know right now, with like so few use cases, whether we should be doing this as like two separate mechanisms or a single one. The other thing I'm interested in here is: just like what's our goal to have less of these transactions, to have them pay more, to have a better distribution of transactions. What is the thing we're actually trying to achieve? I feel like we'd have more ease agreeing on the mechanism, if we all agreed on the thing that we wanted to happen. I don't think we all necessarily have the same picture of what we want to happen. I don't even know if someone I'm not sure if I even have a picture of what I want to happen. I don't like what's the current situation, but what is the good situation? I don't know. And that's what I mean by the concrete thing, like let's just say, like + +[56:00] Here's what Stellar Core could do. Here's a couple of scenarios in which the network's not going to behave well and here's how it would be solved with this, plus some [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) like thing. And then we can even. We want more examples like the one right now in the CAP. Is the we can do groups based on DEX activity. So you want more of that, yeah, and I want, but I also want to maybe know, like, how this would be implemented in Stellar Core. Like again, like config file change to siebel's plus plug in like you know, because is it realistic, like, how often can we get people to like compile new versions of? We get a new version every month, right, so that's why I'm like, do we need people to go and mess with that more often than that? I don't know, in a super uncoordinated way, I don't know. I could do it very + +[57:00] Interesting. Do you know people actually do upgrade every month? Or just for the new protocols? I mean like every? Yeah, at most it's like people, yeah, like upgrade after like three months or something, yeah, so like, if people are only upgrading, like for the new version of the protocol, then there's no benefit to removing this from the consensus layer or in order to remove it, or there would be benefit only if it could be like done by configuration file as opposed to like a recompilation, right. But if you're saying like with the protocol upgrades every six months and people upgrade every three months, like okay, then maybe there's some benefit. I mean like. so validators do upgrade, like you keep their notes up-to-date. I'm not talking in general. Yeah, like people, the bulk of people, takes like two to three months to upgrade. There's pretty big variation. Some people upgrade insanely fast. I mean the CoinQuest. Guys have occasionally updated before us, I think, + +[58:00] Before us, I think, but not everybody is so excessively diligent. yeah, all right. All right, okay, I think we're out of time for today, but I feel like there are some open questions and suggestions for things to sort of work on next and I feel like, ideally, we start to bring those to the mailing list and plan to reconvene with some new thoughts. Hopefully the next meeting in two weeks. So good, all right, awesome, thanks, everybody. See you soon. Thank you thanks for watching out there. + +
diff --git a/meetings/2022-02-17.mdx b/meetings/2022-02-17.mdx new file mode 100644 index 0000000000..359716d4ff --- /dev/null +++ b/meetings/2022-02-17.mdx @@ -0,0 +1,191 @@ +--- +title: "SPEEDEX Lane Policy and Parameterization" +description: "This discussion revisited CAP-42 with its revised lane-based fee semantics and introduced CAP-44, which defines how validators configure SPEEDEX trading. The conversation focused on validator discretion, fee transparency, downstream tooling impacts, and how SPEEDEX asset lists and parameters should be governed without harming usability or fairness." +authors: + - david-mazieres + - geoff-ramseyer + - jonathan-jove + - justin-rice + - nicolas-barry + - siddharth-suresh +tags: + - legacy + - CAP-21 + - CAP-40 + - CAP-42 + - CAP-44 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +The session opened by confirming CAP-21 (generalized transaction preconditions) and CAP-40 (new shared-signer type) had reached final comment, clearing the way for payment channels, bridges, and more advanced transaction sharing ahead of Protocol 19. With those nearly finalized, the group turned its attention to fee lanes and high-throughput exchange design. + +Most of the discussion centered on the revised CAP-42 and the first public review of CAP-44. Participants explored how validator-controlled fee policies, execution phases, and SPEEDEX configuration interact, with particular focus on fairness, operator observability, and the burden placed on wallets, Horizon, and other downstream systems. + +### Key Topics + +- CAP-42 revisions and lane-based fee policy + - Introduction of execution phases to prepare for SPEEDEX and future engines + - Explicit acknowledgment that validators control fee regimes per lane + - Renaming “surge pricing” behavior as discounts to better reflect user expectations + - Trade-offs between Dutch auction guarantees and validator discretion +- Validator behavior, trust, and observability + - Reliance on validator reputation and social contracts rather than hard guarantees + - Need for better telemetry and audit tooling to detect unfair fee behavior + - Discussion of exposing lane and fee data more clearly via Horizon +- Downstream ecosystem impact + - Wallets may need more active fee management and retry strategies + - Potential role for Horizon in automated fee bumping or submission retries + - Education required as users can no longer safely overbid by large margins +- CAP-21 and CAP-40 status update + - Both moved into final comment period targeting Protocol 19 + - Critical enablers for payment channels, interoperability, and shared signing workflows +- Introduction to CAP-44 (SPEEDEX configuration) + - Validators configure SPEEDEX parameters and eligible asset sets + - Motivation: batch trading for scalability, fairness, and reduced arbitrage spam + - Separation of SPEEDEX into multiple smaller CAPs to avoid monolithic proposals +- SPEEDEX asset selection and governance questions + - Concerns about validators “picking winners and losers” + - Exploration of alternatives: issuer consent, reserve-based admission, multiple SPEEDEX pools + - Computational limits (notably linear programming) driving caps on asset set size + - Tension between flexibility, fairness, and operational simplicity + +### Outcomes + +- CAP-21 and CAP-40 were approved and entered Final Comment Period + +### Resources + +- [CAP-0021: Generalized transaction preconditions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) +- [CAP-0040: New signer type for shared transaction sets](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) +- [CAP-0042: Multi-part transaction sets](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) +- [CAP-0044: SPEEDEX configuration](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0044.md) + +
+ Video Transcript + +[00:00] I think that's our cue. Hello everyone and welcome to the Stellar Open Protocol Discussion. As always in these meetings, we discuss Core Advancement Proposal. Aka CAPs are technical specs that suggest changes to the Stellar protocol in order to allow the protocol to continue to drive forward, make progress, evolve to meet the needs of the ecosystem. These meetings- if you're watching, this is probably pretty obvious- are live streamed so that you can follow along. I do want to know this is a very- technical discussion, right? So if you're watching, you probably want to take a look at the CAPs that are linked to in the show descriptions, as are the previous conversations about those CAPs on the Stellar dev mailing list. I'm not going to identify all the participants. They're also listed there so you can look them up. They're qualified. So today, what are we doing today? We will again discuss [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md), multi part transaction sets, which was what we + +[01:00] Discussed last time, but it's been significantly revised since that discussion. We are also going to have an initial discussion about [CAP-44](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0044.md), speedx configuration, and we'll get into what those proposals entail in a minute, but first I want to give a quick word about the life cycle of a CAP and there's a purpose for me saying all this. That I'll get to. So this meeting is really just part of the process of the creation and implementation of a CAP. Open discussion about CAPs. It happens on the Stellar dev mailing list. There's a link to that in the event description as well. So if you want to join, that's the current discussions or future discussions. Sign up for that mailing list. CAPs are drafted and sort of iterated on based on the async feedback and suggestions that come in via that list before they end up here being discussed in this meeting. So in this meeting we go over very specific questions and the goal is to get a CAP ready to move on. If goal is to get a CAP ready to move on, if a CAP is ready, it may be put up for a vote before the CAP committee and they decide on whether or not to accept it. So after a CAP committee vote, by the way, the CAP enters into a one week final + +[02:00] Comment period and that gives everyone a chance to sort of raise questions again on the Stellar debt mailing list. If CAP makes it through a Final Comment Period it's implemented in a major Stellar Core release. Before that Stellar Core release actually hits the network, validators have to vote to upgrade the network to accept that release and so ultimately any sort of proposal that makes it into implementation into a Stellar Core release is accepted by the network through that governance mechanism. So part of the reason I'm bringing all that up today is that before we go on to the main discussion about CAPs 42 and CAPs 44, we're going to revisit a couple of CAPs: [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md), which generalizes transaction preconditions, and [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md), which adds a new signer type that multiple parties can use to more effectively share a transaction set for signing. So these are two CAPs that we sort of got pretty much across the finish line they have. They basically include changes that are helpful for people that are looking to develop innovative solutions on Stellar, specifically payment channels and bridges to other network. The goal of these is to sort of unlock the potential + +[03:00] For greater interoperability. So again, these CAPs, they already went through the ringer. They're currently in a waiting decision. So that's their official status, awaiting decision. But first things first, I think the CAP committee is ready to approve both CAPs, and so I want to check in with the CAP committee and see if that's true. Now the CAP committee is Jed and Nico and David and Jed, who is not here. I checked with before and he is ready to approve. Nico and David, are you also ready to approve, or is there more to talk about? For capital 21, David, you approve, okay? Yeah, it's the same for me. Like the, there was a change yesterday or the day before on [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md), but it's, yeah, it's a minor change, so we're good, all right, approved. So after this meeting I'm going to change the status of those CAPs to Final Comment Period pending approval, and again, that gives everyone a one week window to like raise any final questions or concerns, + +[04:00] I guess if a legitimate concern comes. up during that time, we can send it back to draft mode. But if there's no concerns- legitimate concerns- that arise during that time, those CAPs will be approved and that means they're going to be ready to move on to implementation for inclusion in the next protocol release, which is Protocol 19. So, as of right now, shortly after this meeting, [CAP-21](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0021.md) and [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) officially move into Final Comment Period. All right, now on to the real meat of the matter. We're going to talk about these two CAPs, and the first one that we're going to talk about is [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md): multi part transaction sets. This CAP gives validators control over which fee policy to use for individual transactions included in a ledger. We discussed it two weeks ago and there were several suggestions about how to improve it and, based on those suggestions, it was revised, and so I think, to start with, let's just get a quick overview of the changes to [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md). Nico, you want to take that? Yeah and yeah. So what I ended up- + +[05:00] Yeah and yeah. So what I ended up doing- is basically I added the notion of execution phase, that in the short term, we are not going to have a new phase. It's more incorporation for things like speed x, where we expect to have that being done in as a separate phase, isolated from the existing transaction subsystem, and then within a phase. What I did is that, yeah, after our conversation last time, like I, it was clear that we needed to maybe clarify better like what, like when transactions, or make it more explicit, I guess that there was actually full control of the fee regime that was given to validators. So what I did was I added a new + +[06:00] Tag that allows validators to basically use the actual feedback as fee being charged for certain transactions, as well as having the possibility of having a discounted rates when it comes to, for example, I mean, actually that's what we do today, like we actually give large discounts to transactions that are, when we're not in, search pricing. So that's what I did. I also added some clarifications on how this makes and why this makes sense. I think there were more questions actually- and that maybe, where we can maybe like spend some more time today around, like, are we basically defeating like the existing guidance that we give people + +[07:00] When we say, bid whatever you feel comfortable with and then the network will, you will actually you're unlikely to actually pay for what you're bidding if you're over bidding- and I think there's a this is a fair question. So, on that, yeah, I don't know if David or John, I know like we use. I mean I, you know I like this new version. I think it's a lot cleaner. I think, possibly, in line with what you're saying, if I have one nit to pick in the backwards in the protocol- upgrade transition, we state, as this CAP does not change transition semantics, I think maybe we should just qualify this. Or, you know, like you know, changes the fee, but it's within this originally permitted semantics or something. + +[08:00] But that's pretty, that's pretty nitpicky. Yeah, I mean, I think. yeah, like, yeah, I can fix that definitely. like I think the. interesting thing that was raised both by John, that is on the call, and by lee, that is not here today- are actually good questions around. Like today, you can basically bid by a factor of like a million, let's say, and you're actually very unlikely to be pay for, I mean you're exposing yourself to that, but then a bunch of other people would have to pay that type of fee, whereas with this proposal, depending on which the data you run into, you may actually pay that fee. So, like, for me, the rationale for that is that we are actually kind of + +[09:00] Put it's. I think I put that as my notes, as we can actually rely on some of the a contract between validators, that the. You know the responsibility of a validator is to actually have lower fees, and then we can actually use in a way that as an expectation for validators when they just like today, when you run a validator, you have to have an archive, like you basically have a contract with other validators, and at any time, other validators can basically decide to remove that faulty or not fully compliant validator from their current set. And then we end up with, I think, a system that, yeah, that's realized more on this contract, that it's not necessarily a technical contract, it's a + +[10:00] Kind of a in order to be aligned with the values of the network you have. You know, you shouldn't be forcing, like isolating, for example, transactions so that they just to mess with people. Just a question: if it's more of like a social contract or a set of expectations that validators follow, is there a method for them to know if other validators are living up to that? Yeah, so that's actually a good question. So, like it's, we actually have a similar contract today with like validators should not be excluding certain transactions from their current set, like you would like. The only way you can discover that today is by looking if there are patterns where some transactions- let's say, for certain assets or things like that are always excluded by some validators. You can actually go through history. This is all public and you know, + +[11:00] You can basically kind of audit the behavior of the details, and I think we- this is a similar problem here, where you want to have the tools for auditing validators when it comes to this new power that we are giving them. so the data will be in history. I think what we have right now is that in Horizon, for example, we don't actually expose much of the sap information, so we don't know if what was actually happening during consensus runs, for example, like we don't know if some people are never aligned with other people, like, for example. So I think this is the type of work that can be done either by yeah by the SDF or by other people in the community, to kind of raise the bar, just like we have + +[12:00] Raise the bar when it comes to what it means to run a validator with, you know, like projects like Stellar b right that are showing when validators are flailing or things like that. So I mean, this is like what you've just mentioned is sort of one solution like yes, we need a, you know some kind of validator conventions and ultimately there needs to be some kind of companion document that you know this guy might not be a CAP, but it discusses a like how you configure your Stellar Core to express these policies and like what we make easy and hard to do, like in the kind of default Stellar Core release. It's obviously gonna have a big impact on this, but I think there's another side of this which where you know, we don't want people to have the trust validators, so we should be maybe promoting fee bump transactions like a bit more heavily right. I mean certainly. You know one thing that like wallets could do is they could sort of by + +[13:00] Default, like you know, whenever you sign a transaction, it would also sign like a couple of fee bump transactions that it would kind of like resubmit, you know, like more expensive versions if the first one didn't get through, or things like that. So I think we should be, you know, in terms of backwards compatibility, I think we should not, you know, I think we should make clear that there may be some expectations on the ecosystem, the sort of wallet side. Yep that, yeah, I'm sorry, Justin, go ahead. I was just gonna say because on some level, there needs to be education and tooling to understand how to manage dynamic fees more actively, because right now, as Nico said, you can just set your highest fee and dynamic fees, the network. You know what to expect, but you won't necessarily, is that? So what you're saying, David, is that we would need documentation and potentially tooling to, in order to educate people to the expectation that they, like wallets, will need to more actively managed dynamics. + +[14:00] Or maybe we need a feature, an optional feature, for Horizon that people would enable on their private Horizons, maybe on the public ones, but where you submit three versions of a transaction and it sort of like keeps submitting like the more expensive one if the previous one hasn't cleared in some period of time or something right, you know I can't design the feature on the fly, but it seems like something you, we might want to think about. And if people if want to take their wallets offline or not, be sort of constantly monitoring the network, maybe this should be part of Horizon. Maybe this is an incentive for organizations to run their own Horizon, because they can, you know, control this better. Asked me to say the following: the cp is built on reputation. The concern that a reputable validator is going to go rogue and build an alternative transaction set, nomination rule set- is a bit far fetched and the ecosystem can build tools that monitor validators for behaving sensibly. + +[15:00] Sorry, but there's a big space. between rogue and between like something like I might not agree with right. So I think, like the point is like there could be legitimate disagreement about this and we want users to be in control or to you know, they might, you know, rather not have their transaction be executed if they're gonna get charged way more than somebody else. I think, like, as far as tooling goes, I think like, you know, one thing that seller beat could do, that would do, would be really cool in this context- would be like, hey, like, what are the? Like, you know every 10th percentile- 10, 20, 30, etc. Fees that this validator is nominating. If validators are being like, pretty fair, you would expect every validator to nominate basically the same percentiles total fees, more or less, and that would give a lot of people evidence about like, oh, you know, like, this guy's being a big jerk + +[16:00] And really just trying to get people to pay, whereas other people aren't doing that. But I agree with you, David, that you know. That doesn't change the fact that you know your validator might say like, hey, like, I think this type of transactions pay high fee and this one for payloafi, I think the opposite, they're equal distributions. And so, like, our stats look the same, but we're treating people differently. Maybe all the validators agree, but it just turns out that it's there's some small number of, like, legitimate transactions that are hard to separate from spam, right, and so you need to, I think, just not participate. We need an up, we don't. People shouldn't be forced into participating in the dutch auction, right. They should be able to, you know, control their- you know, maximum fee to ensure that they're not paying more than, like twice as much as other people, and that's something that's, it's doable. It just requires a little bit of support on the you know sort of platform side of things. For me, kind of the like. I've been thinking about this whole thing since we spoke about it + +[17:00] Last was that last thursday, two thursdays ago, whenever it was, and I've kind of convinced myself that in the long run, like, looking like, let's say that we did this in x months, and then you look at x plus 12 months in the future, like probably everything is fine, everybody adapts to the new world. You know, maybe some new tooling develops, like David is talking about. Everybody switches to kind of this lower fee mentality where you don't just bid like 10x, 100x, whatever what you're willing, what you actually want to pay at the present. But like looking right now- you know I was looking at dashboard for the last couple minutes like every minute and the 50th, 60th, 70th, percentile fees have been sitting around 20 000 stroops pretty much non stop in that period. Sometimes it's been the 60th, 70th, 80th, but 20 000 troops and that's, you know, about one third of the total traffic. So there are a lot of people who are bidding way above the fees that are actually getting accepted. In the short run. If + +[18:00] Those people aren't paying attention, those people are going to be pretty sad. I think, yeah, they're just 100 million stroop bid. Yeah, so I mean I guess that's 10 lumens or whatever, is that 250. Yeah, it's not insane I'm. I have to say I've literally said to many people: and I just do a hundred thousand troops, you'll be fine, don't worry about it, just said 100 and you know, I'll try to tell those same people. If that changes, that made sense right. So this change, I think what you're saying. Yes, yeah, this short term pain, long term gain thing that you're pointing out, John, may well be true. I mean, like, I think the difference is that like yeah, like if, for example, like these fees of the network are, like you know, around like the minimum, like when you're not in such pricing price- right, that's kind of where you are. Like, if you paid like even + +[19:00] 100 times more than that, like that still gives you like some advantage compared to other people, like you're still doing some level of bidding. Like I think that's. The difference with this CAP is that you would not be bidding like yeah, like a million times more, like you would be bidding, only, you know, a thousand times more. I mean that's. I think that's. The shift is that you have to be maybe closer to what you want to pay, unless, like in some situations like the, I'm like the people like putting 10 lumen as a fee. I mean this is what we, I think, discussed in the past. Those are the type of numbers I would expect when, if you do like, some trades that actually are going to be valued at more than that, and I think those over the long term, + +[20:00] And I think those over the long term, they will continue to actually be bidding that high and that makes sense, but they will actually pay, them pay. That that's the difference. Well, those fees should come down quite a lot actually, and that's one of the things that made me think. In the long run, this would be better. Like right now. Imagine that you're racing for an arbitrage opportunity. Let's say, the arbitrage opportunity, using the numbers that we were just using, is worth, you know, 11x allowance. You're like I'm willing to bring 10xl. I'm going to try to get you know, one excellent profit. In the worst case, maybe you're even willing to bid 11. But let's just say 10 for the sake of argument. Well, right now, it's like I'm going to bid 10, but I'm probably going to pay 100 troops, a thousand strips, whatever based on like what normally happens during surge pricing. And so you've got, let's say, like 500 people or something trying to do this. One of them wins. The other 499 transactions go in the garbage. But in the real world, where everybody is actually bidding for this, you know 10 XLM thing and they're going to pay 10. Meaning, with this proposal, when I say in the real, + +[21:00] This proposal, when I say in the real world, everybody would actually lose money and expectation, so they'll all have to drop their fees by a factor of the competition. So fees will actually, like their peak fees would come down by a factor of 500 in this example. So that's pretty cool. That gives other opportunities a much better chance to be competitive. So I think long term there could be benefits beyond, just like you know, people bidding closer to reality. But also just like changing the discrepancy between, like I'm trying to send a payment to my friend and I'm trying to take advantage of some arbitrage opportunity. Yeah, I got one more question from Tomer here. Actually it's too. How do we envision validators controlling this? Will we give them configuration options in Stellar Core? So yeah, like I thought a little bit about this. All right, like last time, there were actually questions around that- like: are there like certain configs? I think it depends on the type of strategies we're going to implement. Some of the things where I can see some configs, + +[22:00] The things where I can see some configs being useful, are, if we start to have like this type of surprising in a way. that is doing something maybe over historical data, like when you look at the last few ledgers, instead of just looking at the current pledger and then as soon as you said the last few ledgers were, then how many? Right? So that would be an example where you have a tunable for the cur, like there's another tunable. I think that I can think of. That is related to the. Actually, the first example I had in the CAP that was: can we limit the number of the operations in for that are potentially touching the DEX, right. So, like, that number is an example of a tunable also for that policy. + +[23:00] But of course, you know, people can always go around any of those things, like they can write custom code if they want. So that's a, that's for that. But yeah, for the ones that I think that we will be implementing, like they likely will have, yes, some tunibles in them to potentially even disable them. Companion document: right, that would be more in the actual box of yeah, so is the general consensus at this point that people feel I feel like I literally just interrupted Justin, who's about to say the same thing. But do people generally feel good about this now, feel bad about this? I feel a lot better than I felt when we spoke about this two weeks ago. I feel like we're in a place where, like, + +[24:00] I think we could get people behind this. and it'd be a bit of a mind shift, but- people would feel good about it. I don't know if everybody else feels the same way. Yeah, I think this has been better than the- much draft. I think, much better than the last draft. I think it's this is a tool and I think there's, understandably, some anxiety over, like, how is the tool going to be used? But I think the worst case scenario is not terrible. That, like, we just get rid of the dutch auction and the most likely scenario is that people that you know Stellar Core has some parameters and, like the thing you know it's done in such a way that you know it's like simple payments are very unlikely to do this because they would require changing the sequels plus, and that, yes, we understand that certain assets and then overloaded DEX may end up getting this treatment. Yeah, exactly like the worst case scenario is that we end up with what basically everybody else in the space is doing and what we used to have before we went from construction, which was before the + +[25:00] Fee bump transaction, and so I think that there's some I still like the dutch, auction, but I think it would have been harder to make the case for the dutch auction if we already had the fee bump. And so you know, now we've got both options. Yes, I think, like I don't know if people so, and I think that would be actually part of how to explain to people like the shift here is that I think by renaming what we do to discount, I think it's actually helps quite a bit people understand what is actually happening in terms of like. You know, when you think of a discount, I'm potentially going to give you a discount- you go to a store, you actually expect to more or less pay. What is, you know, displayed before you start to haggle? And I think that's kind of the. You know, I think it's more similar to this via slack. Tomer is asking questions about downstream systems. So how do we expect downstream systems to understand + +[26:00] The fees collected? And then he also said dutch auctions are already an oddity in the space, so this only makes fees more opaque and then again raises a concern. We need to figure out how to explain this to downstream systems. So I know that this is a. Well, I think that's a valid question. Downstream systems: how do we expect to explain this? That we've removed the oddity at least some fraction of the time? Right, I think there was a question around like being able to see which groups and all that from Horizon, from lee on the mailing list. I mean, the data will be there, like it's actually part of the consensus value. So, yeah, like I think, yeah, we'll probably have to think about how to maybe like expose this a little better from the main api that comes on. Maybe it's the api also is like going to give better + +[27:00] Idea of, like, what's going on. And the current one is actually also kind of strange because the stats of other, if you look at as a developer, if I hit that api that gives me stats about the last five ledgers, it's actually not even today. It's actually not that great in terms of experience, because you're looking at, like a lot of people like, should I use the tenth percentile or my year's percentile? Like and actually you're not even guaranteed of anything by using this. They're sometimes because it's only five ledgers and you know past performance doesn't predict future. So, yeah, I think for a lot of developers, they just ignore. They don't. They just say: why do I need to check, like they've learned? Why do I need to check fee stats if I just can set my fee super high and only be charged the lowest? And so I guess this comes back in part to that question: right like, if suddenly, yeah, displaying information about fees in order to make an informed decision is more important, I think the so it will be actually. + +[28:00] I think the so it will be actually, you can still have, like, the approach can still be done very much in isolation, because we, you know, by like similar to you know what we have today, right, where people build like some high number and then that's kind of it like now they will have to be a lower than, not better than that. if they are not comfortable with that higher number, and then retry and then do a, maybe like a like depending on the app. right, like, it depends. Like for many applications building like a tenth of a cent is actually fine, right in fee, and that's kind of the end of the story. If, like, if you really want that transaction to make it, but you don't want to necessarily expose yourself to variations, + +[29:00] I think a fine strategy is just to have like a timeout and multiplier on top or something like that. Right, like, and it's not complicated. Okay, I mean, I guess if I had one reservation about this, maybe I should just voice this now I'm not sure this is a problem, but in the nomination value comparison, I'm not 100 convinced that the second item in the four tuple, the second most significant item, should be total fees that the transaction set will collect. An alternative, which I don't know if it's better, but the point is I don't know an alternative- would be to say it's the total pledged fees, even if the total collected is less, because some transactions ended up in the discount bucket. Now I understand there's definitely a good argument why total fees, of course, + +[30:00] Is better. Otherwise maybe validators could like game the system or do other weird things. However, there's just one scenario that maybe I'm concerned about. Imagine a situation where we have this thing that we think is not very economically beneficial and it's spam. So we say, okay, we're going to put that in the non discount bucket and everything else goes in the discount bucket, and now you have these transactions that are very important, that are willing to pay a lot of money, but, because they're in the discount bucket, two kind of maxed out nomination values that are completely, both completely full, so have the same number of operations end up favoring the one that collects more fees where it actually even though more fees were pledged to the other one. It's just that the other people are getting a discount that maybe they don't even want. Yeah, I mean that, yeah, that I mean, I think I don't think it like, using the total feedback is necessarily a problem, + +[31:00] Like we do. We just need a way to compare right, like the quality. So I think that you know it actually is aligned with the. You know the intent. Yeah, so I think, yeah, like the bid is fine, probably so. Are there other questions to raise here or points to discuss? I feel like there's still issues that maybe already exist on a mailing list. Is that correct or actually, I guess I'll just say it this way. I feel like this should that this is not quite ready to move into a vote or anything, that there still may be some time for marination just to think about a few of these issues, including maybe thinking about the downstream implications. Just, yeah, I think I want to maybe add a little bit of detail of like the type of expectations for the downstream systems, and then + +[32:00] We can, and then we can actually move forward with this one. I mean, here's another thing we could do that I don't necessarily think is a good idea, but just so that it's been on the table, we've decided it's not a good thing to do. So we could. So unfortunately there's no like flags, you know, section for the fee bump transaction, but we could kind of add a new type of fee bump transaction where there's a flags field and sort of you know one of these flags could control, could say like, basically, like this is very important, like I'm willing to pay my full, I'd rather not have a discount if it, like you know, risks my transaction having like less chance of getting in. So then at least somebody who, like their transaction is very important, they say like: hey, don't exclude me just because you're giving me a discount. I don't even want that discount. Yeah, I don't. think we would, of all the strategies we talked so far, like we would basically have like those groups, right, and then within those + +[33:00] Groups you may or may not get a discount, but even if you do get the discount, you, have to be the highest bidder. So I think, if you bid more than everybody else- like you know what I'm saying- we'll get in right, not necessarily because you might not be in the winning nomination value, right, I'm concerned what I mean. A validator, right, that's going to craft a set right with all those groups. If you bid more than everybody else, you will be included, not if you're yeah, sorry, you'll be included in the nomination value but it may get beaten out by another nomination value. Oh, in practice we normally only have like one nomination value right or two. Yeah, that's very small, yeah, but that's. But now we're starting to place, you know it, increase the amount of trust in you know, like that's not what SCP is designed to take to guarantee right. So, yeah, I mean at this point what you're saying, like, well, the other value- right, that's from a different validator- would also not + +[34:00] Include that transaction. That is the highest bidder, right. So that's why I'm imagining, I think this is a different problem- that you're you have no a. That's the same problem. The problem is that somebody keeps nominating a block that has more spam transactions and therefore collects higher fees, and that value kind of ends up beating out a value with more useful transactions. But I think this is more of a like, why would other validator, if there are more spam values, right, like this high fee transaction would be nominated by this other validator, unless they are actually decided that they would no. But I mean, like SCP is byzantine, fault, tolerant, right, we're supposed to assume that, like, some fraction of validators could be misbehaving? Yeah, of course, yeah, and in this case, and so I want to make sure that doesn't like severely impact the, + +[35:00] You know, the, what we consider the good transactions. Can I clarify here: is this not solved by switching the second criteria to the total feedback? Oh, sorry, if we're just gonna do that, then that's fine. Yeah, that's right, that's what I said earlier. Oh okay, sorry, I thought you were gonna consider it, but you weren't. So that's why I was saying: here's another thing that you do: yeah, okay, sure, I could still imagine the utility of like no, seriously, I really mean to pay this bid flag. I just wouldn't support that today. But if somebody were to come around and be like no, like seriously, I have a real reason why I really want to pay these ski bids and waste my money. Maybe we, maybe there is a real reason for that- I don't know it yet, but like that wouldn't be a hard change to make. Like you were describing about adding a flags thing to the transaction, to the debunk transaction, right, okay, so I guess that's another thing is switching that second value to be keep it instead of what it is now. Okay, + +[36:00] Yeah, okay, cool, I feel like that's in progress and now I think it's time to move on to speed x. So the next CAP for today's discussion is [CAP-44](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0044.md): speed x configuration. Speed x is a new design for a fully on chain decentralized exchange that can scale to an arbitrarily high transaction throughput. It's implementing it is something we, SDF, mentioned in our 2022 roadmap. It's something that's important to sort of the future scalability of Stellar. It's going to take a few CAPs to implement speedx, and this first one is really just about setting up validators so they can configure speed x. But rather than me trying to explain that I will pass it, I guess, to John or maybe to jeff, who wants to sort of give an overview of what this CAP does. I'll pass it to John and John. If you want to pass it to jeff up to you. I'm gonna pass it straight to jeff. And what I was thinking is jeff could open with just like a quick pitch on what. + +[37:00] Like what speed x is, why speed x, and then maybe I can go and kind of give a. quick pitch on like why we're breaking up the CAPs into a bunch of different things, like why we're going section by section and kind of what this one is about. And jeff wrote all the drafts of all of this stuff in the summer and we've just been cleaning it up and kind of like trying to like make it as physically simple as possible, but all of these ideas are really jeff's ideas, so thanks, yeah, so speedx is a design for trading, settling a bunch of trades in batches instead of serially, and the reason for this is, on the one hand, if you settle trades in batches, you can parallelize the operational exchange so you can scale to many transactions per second and also by trading in batches you get the market becomes more fair and you get better liquidity between assets that aren't very traded very much, and you cut down on a lot of the common front running attacks or the arbitrage opportunities that + +[38:00] We've been talking about, that clog up the network as if. As for this particular CAP, speedx needs a certain amount of configuration data. In particular, the big thing is, one, control parameters on how you actually run the batch computation and two, a way of choosing which assets are traded in the batch and which ones aren't, and so what this CAP does is take a very like the simplest approach towards deciding which assets in the batch, namely, there's some set list and then the validators can upgrade that with some kind of upgrade protocol, but you can imagine the future doing some other way of choosing which assets are traded in the patch. And it's not quite clear exactly which kinds of configuration data we need in the CAP at this point. What actually we need will probably depend on what we finalize for other speed x related CAPs, but this is, we'll at least need the data that we have here + +[39:00] And just to just give a little information about, like, why we're breaking this down into a bunch of small pieces. So I kind of learned a lesson after we did the liquidity pools work where, like, we produced this like 1500 line CAP and it was just like really unwieldy and people never knew what parts of things we were talking about and what are the interactions between things and stuff like that. So this time- different concept, let's break. Basically, let's break this problem into all the logical parts like this: the configuration of the upgrades. This is like the technical pricing algorithm that we're going to use. This is how we're going to get the trade, and those are kind of like the four verticals that I'm seeing, and so this one came out first because it was short and kind of easy, and like it was very well established, like what the space of options were. Pricing will come out in the next few days and then we'll keep going. But I think jeff already gave a good overview of what's kind of in this particular proposal. So, yeah, and this one contains actually the most controversial part of the speed. Example is it's actually related to what + +[40:00] We just talked about with validators being able to do things that maybe they shouldn't have control of. So maybe we should start right on to that particular topic. Yeah, just quick question: like so, when they're determining this list of assets, this is an extra protocol, just like decision that they're somehow making. Like they're saying, hey guys, I think these are the assets that we should add, and then they all. I sort of imagine there's like a listing process or something where you know, like we have this whole set of upgrades for like changing the transaction, set size or whatever. So I'd sort of imagine we'd do a similar kind of mechanism there, but like, whatever we do, it would be kind of outside the scope of the protocol. You know, whatever the ecosystem wants to do is their system. That's what they should do. I my hope is that this list isn't changing like every second. That would be really unnecessary. Like so it should be the type of thing that people can coordinate pretty easily on + +[41:00] Like a day to day basis. But the real question I have here is kind of like, are people going to be upset about handing this kind of power to validators? I mean it's. very much like, not in the kind of. It's that we currently explore, at least before [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md). It's very much the same kind of power of like hey, like, these assets live in this privileged world where they have access to this feature and these other assets. Don't do people feel bad about this? Because I have something? I mean personally the way go ahead. I mean, I guess the thing that I would, like to at least consider, is that, instead of one magically privileged speed x you could, at least theoretically, have room for like multiple speed x's where asset issuers could, you know, by default be included in any speed x but could also like say like which speed x they want to be included in, so they could kind of revoke permission for being in a particular speed x. So if + +[42:00] You did this, then this is seems like a. more Stellar way of doing things, because then in practice, like all the assets anyone would care about will probably all want to be in a speed x together. But if some another bunch of random assets wanted to be in a speed x together, like you know, importance is in the eye the beholder. No one's to no one is to say that like assets a, b and c are less important than assets x, y and z. It's just that you know x, y and z are happy together and a, b and x, y and z are happy together and a, b and c are happy together. This is an interesting concept, not something that actually came up in the earlier discussion at all. So already I'm intrigued. I guess my first kind of like reactionary question to this is, like the main motivation for breaking out like a speedex list is just computational concerns. So how does that square with having multiple lists or having to like? You could even imagine a world where, like everybody's + +[43:00] Like, okay, well you know, like there's this one list which has this preferred status, but like let's put everything else into this other list. Well, because the problem is that the list themselves would have to be capped. Right, because there's a super linear stuff in the kind of number of assets. Right and so if you can, you know it's cheaper to compute two small, two speed x's with like 100 assets than one speed x with 200 assets, isn't it? Certain parts of it at least, I haven't figured out how to scale beyond some fixed quantity of assets effectively. Which parts are those? So the price computation, the number of rounds that you need seems to like increases somewhat as the number of assets goes up and but the big limitation I think is the linear programming follow up phase. + +[44:00] The hand rolled the hand world solver that I wrote for Stellar over the summer, was turns out doesn't work very well at all. Past like 20 assets. If you take an office shelf solver- I was getting it to 50 to 80. And then a custom hand rolled solver that I had all sorts of you know, sparse optimizations and everything in it. I could push that to 100, but the performance of that really starts to increase very non nonlinearly after a while. I don't. I wasn't able to push it past like a 100 ish. Ish so I think some other technique might be needed there. What's the incentive for an asset issuer wanting their asset to be in a speedex list? You get basically more liquid trading and or like better usability on the asset, + +[45:00] Going on what David was saying. So one of the things that comes up in speed x is like in the speedx pricing CAP that I'm still in the middle of working on, based on jeff's, but we- spoiler alert- we agreed on this kind of scheme where basically, like in the pre nomination phase, validators nominate a set of prices and then, in order to see if those prices are like relatively sane- you know, we also, like, have all the validators do like the standardized computation using the solver parameters in the config to try to produce a second thing- and then we choose whichever price to seem like they're better. That second phase requires that linear program that jeff was talking about, which he doesn't know how to scale up very high. + +[46:00] But if we just said like hey, like we have this one speed x, that we're gonna do both phases, for we'll do the pre nomination and that's how we'll try to produce a really good, reliable set of prices, and then for the second phase- or sorry, for all the other speedx lists, you have to both present the prices and the network flow that's already rounded so we don't need to do the linear program, then we avoid that scaling issue, and then maybe we can do more we don't do the internal calculation, we just accept those prices. Maybe they're good, maybe they're bad, and that's the cost of not being in that main list. But that is a lot fairer than saying like: you're either in the list, you're not, nonetheless. I mean, somebody has to do the linear program. Either way though, right, sure, but I guess my argument would be: if you can't produce a solution, you won't trade right. But it's not necessarily people who want to trade who are + +[47:00] Producing the solution. It's the people who have access to the orderbook because they're, you know, on, you know, validators in the network. But, like you could very much like- bear with me for crazy ideas, you could very much imagine running. You know like, hey, like you know, we're the people who like want the speed x to happen. We run this web service. All your validator has to do is hit this web service and we'll spit out the answer for you so that you don't have to actually do the computation. Well, oh, but the point is now it's not a sorry, but is that the valid or the end user is doing? Is it like, is there's a special operation type which is like speedx solution, or it's like right. So the point is like you were disempowering the end users. If we're saying like now you're kind of like, you're reliant on, like whatever solver your validator happens to be willing or not willing to use, but the flip side of it is like there's 90. When I looked at this, when we were working on this in the + +[48:00] Early winter, there were 89 000 and something assets on Stellar. Even if we broke it into 100 groups of 90 to standard jeff's 100 threshold, we couldn't do all of that in the same way, like we couldn't give everybody the same level of attention. There's not enough time to do it Well, I think you just have to price it properly, right? I mean that you know we could have, you know, a super linear- you know we- reserve fee for, like putting your asset into a speed x or something, right. It could like increase with the number of assets and speed exits. I think you know I'd rather like take the actual costs we have and reflect those back onto users and say, like this is what it's going to cost to network, so like you have to be willing to pay for it, rather than having validators make value judgments about, like which assets are good and which are bad. I'm sorry you're so willing to have that it would cost. The issuer of the asset would be the one that would bear the cost of being listed into a speedx list. Not the cost, but the reserve + +[49:00] Requirement. The reserve requirement potentially- and again I'm not saying that, like you know, I do think that there are kind of spam assets in their assets that everybody cares about. But I think that the va that we should kind of, instead of just magically decree the value of an asset, we should have existing asset issuers say, hey, I actually value having a market between my asset and this other asset, right, and that's the sense in which maybe you could end up with multiple speed x's, like maybe there's a weird like in game currency market for like a whole bunch of, and then there's like an actual real currency of like stablecoins or something. And you know, maybe those two markets don't need to inter operate or they only operate on the native asset or something. And then the validators aren't don't. We don't have to be in a position of saying, like actually we think like euros are more important than like game credits or whatever which like we might, but like why should we be making that value decision? + +[50:00] So we could say, like each speed x batch has like an ID or something, and it's not the validators that choose what's in that, it's the asset issuers can like propose to put their asset into a batch and then if you put your asset into a batch, then all the other asset issues were there, like say yes or no, or they have a vote or something, and basically the asset issuers are the ones choosing who's in the batch and not the validators. Yes, I mean that is one way to do it. is sort of like another way to do. It is sort of like each person who joins the batch, it gets like exponentially more expensive and you need an exponentially larger like reserve. So it's like you know you can sort of out, or maybe you can outbid whoever it's, whoever has like the largest reserves who gets in, or some combination of the above. I feel like we probably want some amount of input. I mean, what are the signals that are sort of like in some sense egalitarian, at least kind of, you know, like a + +[51:00] Objectively egalitarian, but like are potentially useful? So one signal is like: does an asset issuer value a market between his or her asset and some other asset. And another signal is: like: what is my? How much? How many native assets do I have? You know what do I meet? A high reserve requirement and so like if we could sort of factor these two things in. So the reserve requirement is an absolute thing and the whether one asset thinks another one is interesting is a relative thing. I think you still need some kind of veto power from the people who are already there. It's just so well, that's what I mean. That's the first one. That's the relative thing that I'm talking about. I'm saying, like what I mean, maybe it's a video or maybe it's a preference. I'm going to avoid something that Tomer just also shared on slack, which is a concern with, like creating too much complexity around these, around how these configurations work that make it really hard for people to interact with Stellar, just because there's too many. + +[52:00] It's just like issuers don't have to do this kind of thing anywhere else. Adding a lot of complexity makes Stellar just less appealing for use. It's something I think that is worth considering in this design. Yeah, so I mean, and part of the way we can get around, that is by having, like, really sensible defaults, right, but I feel like we should, we at least need some of these. We need to like work through some of these designs before we decide that they're like too comp, complex, or before we give up like this is just there's too much like I just don't want validators picking winners and losers. You know, if we can at all avoid it, so we need to. If we can at all avoid it, so we need to either avoid that or convince ourselves that there's no way to avoid it. And I just don't think we're quite there yet. And like this, maybe different related questions. So like, what was the reason to + +[53:00] Like if you had a limit, a limit, right? So that's what you're like. The only thing really there that you need for sure is the maximum number of assets involved in this fedex run. So then, what was what is? Was there any reason for not just using fees as a way to force people to just? Like you know, you basically only keep the top k, top k assets right that are involving speed x, and that is defined by fees. The reason that doesn't work is that you can imagine that there are time, like you can imagine, for example, like a us government bond- take that it's like one of the most liquid markets for anything in the world, probably like on the same par as like other, at like other very ultra liquid things, like usd euro, but like those are like some ultra liquid things. But like I want to focus + +[54:00] Liquid things. But like I want to focus on the bonds because the prices don't change all the time. Like prices change but like if you look and then you look again one second later, you might see the exact same prices. In fact, you're probably pretty likely to see the exact same prices. In those cases there might be nobody who's willing to pay a fee to trade, but there'd probably be an incredibly liquid liquidity pool that also would be willing to trade to help settle out all the other trades that people want to make. That liquidity pool isn't bidding any fee. So do you just say like, oh sorry. Like we're not going to include the most liquid market in the world because nobody wants to trade this exact second? That seems really perverse to me, right? I think what you want is for all the other assets to say, hey, I want people to be able to like trade you know these treasuries or whatever from my asset, because that's like, it's a useful feature for users of my asset to have. Right, so that's why you need + +[55:00] Some amount of kind of, you need some, you need configuration. You need some kind of input from the asset issuers. Right, we should at least consider a design that has some amount of input from asset issuers. I mean, like I still don't understand. Like you know in your example, John, yes, you have this asset that is very stable, and so on, but like, even if that asset is included in a pool, if people don't make money of you know, doing a trade against some other asset, like if the, if that trade is not that valuable, like how is such pricing going to work? No, it is valuable. It's just not valuable to the asset issuer, right? Imagine, suppose you know the fed issues a digital dollar on Stellar, right? I get it like it's more like what I was suggesting is that you, the way you pick the top 10 assets is by actually looking + +[56:00] At the trades that are happening across you know the network, because you have to have this function that you know which top k, top end transactions are you keeping for that speed x round and as part of that, you can wait. Sorry, is it for a round? I thought this was a more coarse grained thing, like we just kind of. You know, every once in a while we'll configure a, you know, speed x to have a different set of assets. I mean, yeah, it should be a wrong. I mean it's there's a level of granite granularity there. I don't know like, but you know I mean if we are going to do it, find granularity, then we should do it through some kind of transaction as opposed to through, like, a protocol upgrade or whatever the upgrade part of the nomination message of value. If it's per asset issuer, it should be a yeah transaction and not an upgrade. I guess I think doing it to find grain is a little dangerous just because there's gonna be a lot of assets that are sporadically traded and so, like, + +[57:00] If you try and like optimize, like I don't know, some max over the transaction set, assets are gonna be going in and out of the pool a lot and that's hard to predict for users and so usually kind of gonna get a lot of like really failed transactions, which is a bad experience. Yeah, the CAP mentions that and I totally agree, like I definitely wouldn't want a world where that set changes every ledger. That sounds like really impossible to use and everybody will just be really frustrated and hate it. Well, at this point we are out of time. For today's meeting, I guess just jeff and John. Do you sort of have some input that allows you to go move forward? I mean I know that we can bring this back to the mailing list too. Is there any like final question? That's like what should happen? What's the next step that we need to answer here? Are we ready to move? You sort of have enough. I think it's pretty clear that people want us to ask other questions about what could be done. Like I have some pretty strong opinions about what like why it should be + +[58:00] The way I proposed, but like I'm really happy to go and explore some other things and try to write up what they would be like and like what pitfalls we might encounter. It'd probably be kind of fun honestly. So I kind of know what I need to do. Jeff, you feel the same. Yeah, I agree with that. Yeah, well, I mean, I think maybe it's like for me, it's like that. There are some questions here that I'm not sure we can answer without actually having like. How do you actually produce the denomination value, like, if I don't have a clear idea of like, how do we pick which transactions to be included? You know, like based on fees, like it sounds like some of the properties you're looking for. a more balanced like composition, as opposed to just high fees, and I'm not sure: are you talking about me or about David? + +[59:00] No, about you. Yeah, John, I'm looking for whatever simple thing we can do that people can agree on, like. That's why I just gave it to the validators, because I thought we'd be able to agree on that and they could agree and figure out what is good for the network. That's what, like, when lee posted on the mailing list and about like other things we could do, and my response was basically like you'd have to convince me that a heuristic would outperform the validators who care about stewarding the network, assuming that we are actually restricting like a small number of speed x assets. In a world like David's where we have multiple sets, maybe, and we're expanding the capacity by, like breaking them up, maybe it's a different trade off but, like if we're going to look at a single block, you'd have to convince me that, like the heuristic could outperform the validators who just want to do what's good for the network. I mean the other question is: would it make sense to have two tiers? Like you, you'd have like the core speedx assets and then other assets that can be traded for any of the 20 core speedx assets. But it's not making a market between any + +[01:00:00] Of the like a pair of non core assets. Shmedex has assets, shmedex and speedx. Sorry, we've exhausted Justin. He's trying to kick us out and all right, look to be continued. Laughter thanks everybody. Yeah, to be continued. Seeing a couple weeks and on the mailing list again. Everybody out there. Thanks for watching. If you're interested, join the Stellar dev mailing list to participate in the discussion as it unfolds. + +
diff --git a/meetings/2022-03-03.mdx b/meetings/2022-03-03.mdx new file mode 100644 index 0000000000..7bc0721a0e --- /dev/null +++ b/meetings/2022-03-03.mdx @@ -0,0 +1,180 @@ +--- +title: "SPEEDEX Lane Configuration and Monitoring" +description: "This discussion revisited CAP-42 and introduced CAP-44, focusing on lane-based fee mechanics and the on-chain configuration of SPEEDEX. The conversation centered on validator discretion, wallet behavior, governance tradeoffs, and the operational limits that shape how SPEEDEX asset sets can scale." +authors: + - david-mazieres + - geoff-ramseyer + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [legacy, CAP-42, CAP-44] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session continued the deep technical discussion around transaction fee lanes and the emerging SPEEDEX architecture. The first half focused on final refinements to CAP-42, clarifying how multi-part transaction sets change fee bidding strategies, what wallets should expect when lanes fill, and how validators can safely roll out lane-based behavior without breaking existing tooling. + +The second half introduced and stress-tested CAP-44, which defines how SPEEDEX is configured on-chain. Much of the discussion explored governance and scalability tradeoffs: who decides which assets participate in SPEEDEX, how many assets can realistically be supported, and whether validators, asset issuers, or market forces should control inclusion as the system scales. + +### Key Topics + +- CAP-42 readiness and rollout considerations + - Lane-based fee policies and revised transaction ordering + - Wallet behavior changes: re-signing vs fee bumping + - Validator-controlled activation and coordination timing + - Communication needs for ecosystem readiness ahead of Protocol upgrades +- Motivation for CAP-44 (SPEEDEX configuration) + - Enabling a fully on-chain, batch-based decentralized exchange + - Removing the need to guess intermediary assets in path payments + - Separating SPEEDEX into smaller CAPs to reduce risk and complexity +- Asset selection and governance debates + - Validator-controlled configuration vs issuer-driven preferences + - Concerns about liability, neutrality, and “picking winners” + - Discussion of veto power, defaults, and issuer opt-in models + - Risks of stake-weighted or heuristic-based asset selection +- Single vs multiple SPEEDEX instances + - Tradeoffs between predictability and flexibility + - Liquidity fragmentation versus scalability constraints + - Possibility of multiple SPEEDEX pools as a safety valve +- Scalability and computational limits + - Price discovery and linear programming constraints + - Practical upper bounds on asset set size (hundreds, not thousands) + - Need to balance performance with market completeness +- Observability and operator tooling + - Validator telemetry and logging requirements + - Horizon and SDK exposure of lane and SPEEDEX configuration + - Monitoring unhealthy lanes and failed submissions post-rollout + +### Outcomes + +- CAP-42 was considered ready for an asynchronous CAP committee vote, targeting inclusion in Protocol 19. +- CAP-44 remained open for further iteration, with follow-up proposals and concrete alternatives requested before advancing. + +### Resources + +- [CAP-0042: Multi-part Transaction Sets](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) +- [CAP-0044: SPEEDEX – Configuration](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0044.md) + +
+ Video Transcript + +[00:00] All right, hello everyone and welcome once again to the Stellar Open Protocol Discussion. These meetings are for discussing Core Advancement Proposal or CAPs. They're technical specs that suggest changes to the Stellar protocol and they allow the protocol to advance, to evolve, to meet the needs of the ecosystem. They allow us to add new features to the protocol. These meetings are live streamed so that anyone who's interested can follow along. But again it's a technical discussion about these very technical CAPs. So if you're watching, you probably want to take a look at the CAPs that are linked to in the show descriptions along with the mailing list discussions about those CAPs. Today we're once again discussing the two CAPs we discussed last time: [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md)- multi part transaction sets, which again has been revised since our last + +[01:00] Discussion, and [CAP-44](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0044.md)- speed x configuration. There have been a lot of questions on the mailing list about that. So this meeting again is just part of the caf CAP life cycle and a lot of open discussion about the CAPs actually happens on that mailing list again link to in the show description. So if you're really interested in joining the discussion, please sign up for that mailing list and you can sort of see a lot of these questions come in and a lot of the answers are made there or they're sort of brought the questions are brought to this meeting, and so I'm going to sort of end it there, if you want to look up anyone who is in this meeting and find out who we are, You can see our names. I'll let you do that on your own. But in the interest of just keeping things moving along, let's actually just kick off the conversation. So first up, we're going to talk about [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md), multi part transaction sets. This CAP would give validators control over which fee policy to use for individual transactions, including included in a ledger, and we actually discussed it previous meeting. It was revised again, + +[02:00] Revised, it was revised again, and so I'm going to kick it over to nikola, who made some of those revisions, so that he can walk us through the changes real quick, Nico. Yeah, thanks, Justin. So yeah, like the actually there was, the changes I made were actually pretty minor, I mean expected, given where we are. So yeah, like the changes were basically reflecting the what we discussed at the last protocol meeting. So the actual functional change in the CAP is the use of the total on the sum of fibs as to sort transaction sets. And then the other change is related to questions that we had last time and that were also raised on the mailing list + +[03:00] Around: what does it mean for clients in particular? We have documentation today that talks about using, like the fee stats or the like, bidding as much as you can as a strategy, and this needed to be tweaked a little bit. So I put a, possible approach to how to do this with the after this new captex takes effect. So like I don't know if people have time to review it, I think I imagine actually that maybe lee, that I think was not at the last protocol meeting maybe has some questions. Yeah, I did listen to the last protocol meeting and I think the conversation actually answered a lot of the questions that I had and I think you and it goes on the mailing list- I think the only comment that I had about the conversation, where you know the wallets will probably change + +[04:00] The pattern that they use to use things like fee bumps. You know if they need to bump the up the fee they're paying- I think realistically for your average wallet- you know fee bumps will probably not even be necessary. It'll be more like: you know you try to submit a transaction, it fails and so you just modify the fee on your current transaction, so you've got a new transaction and then you sign that again and submit it. The pattern sounds pretty clear and I think the only remaining question I have is: do we need to discuss what the rollout will look like? Because, given that wallets, will you know, there is some change in expectations. You know, what sort of lead time do we basically need to give the ecosystem to adjust or learn about this new process, + +[05:00] Right? So, like time, I mean, yeah, like as long as people like you said, like today, I imagine that most wallets actually deal with like failures, like timeouts of when they submit transactions. But I do agree that there is like some awareness here on this is about to change. You may actually run into, well, you probably want to maybe reduce the maximum bid for wallets that are where people actually set very high bids, which I think there are some. I mean not very high, but, like you know, like, let's say, one lumen, which is very high for in today's standard. So, yeah, so there's probably some communication that needs to go out there too, that people should actually not put like some very high number that they don't expect to pay and instead, + +[06:00] I think, maybe aligned with what actually, a lot of wallets are- it seems to be doing a lot of what the common fee. We see, which is more in the 10 000 troops or something, or 40 000 troops, which is still some penny. Yeah, I think a lot go with a hundred thousand, which is I mean this is still fine. Yeah, I mean there are still. Even with this new thing, unless people are doing, I think the first wave of changes will probably be around, but we discussed earlier in the morning- is that was make it a bit more aggressive for, like this, those trading this kind of trading activity. There is more competition there. So if people are submitting at, yeah, like 100 000 troops, they may actually end up paying that with the with this update + +[07:00] For trading, yeah, so, yeah, I don't know like. I think it's more like, yeah, like a communication strategy and documentation. I think that we need to work on just in terms of communication strategy and documentation. Are we imagining that it is possible that this CAP could be included in Protocol 19, or is this already something that might be for a subsequent protocol? Oh yeah, Protocol 19. I mean, like, ideally, I think we can put it in protocol knighting, which will, I think, at the current pace it looks like maybe it will be like in the at the earliest end of may, which is quite a bit of time for people to adjust their wallets or their settings in their wallets. Yeah, I also think the lead time on using the feature + +[08:00] Doesn't have to be the same as the lead, time on implementing the feature. So if this went into Protocol 19 but we do some surveying and it looks like you know that's not a realistic time frame for wallets- like, maybe we could, maybe the ecosystem will hold off using the feature for some period of time, right, because validators would have to decide to use the feature, and so there could be some readiness check where, before any validator use of the feature happens, there's just like a sort of coordination. Okay, yeah, like, very like, yeah, exactly like those things will be enabled by flags on the validator. So, yeah, there will be a an actual role. I mean it means I mean it still means that if, like, after this change takes effect, you, if you have like, one of the validators that decides they want to ignore the flags or enable the features anyways, they can + +[09:00] Like the audi trail that we have in the, in the archives on the on behavior of your validity. Are there any questions about where we are with [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md)? But it sounds like we're in a pretty good spot. I think we left it at a good spot last time and this addressed the things that came up. So seems pretty much ready to me. I mean I have one nit, but it actually doesn't come up in this CAP. which is the way we indent our case statements, is not compatible with git diff. So, as a general point, maybe we should either like include both the diff and the actual data structures, or we should like reconsider how we indent XDR files. It + +[10:00] Just so happens that we lucked out and then this diff- like there's enough context to read it properly, but sure, I think that's a formatting issue, right, which we can sort of talk about offline. Cool, I mean it sounds like with this CAP, it is probably ready to bring to a vote which I can do off, which I can. I guess we can maybe do at the beginning of the next meeting, since Jed happens to not be here today, in which case I think we can move on and talk about we could vote asynchronously. I don't think we need to wait for the next meet. I mean I'm ready to vote and to move this forward. So, okay, cool, I'll schedule a vote asynchronously and then we can share the results publicly and that way we don't have to wait and we can sort of push that forward. Cool, so, I think, then, in other words, [CAP-42](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0042.md) is ready to put to the CAP committee for vote, which is super cool, possibly for inclusion, Protocol 19. But now it is time to move on to the next discussion point, [CAP-44](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0044.md). So cap + +[11:00] 44 again is the speed x configuration. CAP speed x is a new design for essentially a fully on chain DEX decentralized exchange that allows for scaling high trend to high transaction throughput. And we talked about speed x: a fair amount this year, it's part of the s. More importantly, get rid of path payments, so you no longer have to guess intermediary currencies to get the best price. Cool, yes, and but for speed x we actually decided to break up the implementation of speed x into several CAPs, and this first one, [CAP-44](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0044.md), is about how speed x is about who and how speed x can be configured. And there's been some pretty interesting discussion on the mailing list. But to sort of kick off, I'm going to pass it to John. Sure, thanks, Justin. So just to give like a quick overview of like what the discussion on the mailing list has kind of been about. The conversations have kind of gone in like + +[12:00] A couple different directions. One question is about: can we practically derive the set of assets that we can use with speedx, rather than trying to configure it? I think the answer to that at this point is probably no, but we can talk about it more then. Another line of conversation has been around the, basically the lane of like: okay, let's say that we do the simplest thing and just you know do a do what's in the CAP now, just like a voting upgrade system, can we give some guidelines to the validators that are actionable and predictable, so that we don't have arguments in the community about like what should be included? And then a third line of conversation has kind of been about like the more general process types of things like: should we change the way that we do upgrades to make this easier and increase visibility and stuff like that. But none of this has resulted in + +[13:00] Any changes to the actual proposal yet, because it's not remotely clear which direction we should go. So, with that kind of overview of where the conversation has gone, maybe we should just like open up the floor and see what people want to talk about. My two cents around the actual like ecosystem stuff around how to make decisions. I don't think this belongs in the CAP I as a starting point. I think we could just discuss- you know, validators can discuss over the values channel on key base of the validators channel on the Discord. There's no need to put too much structure around this- same way that we coordinate protocol upgrades these days. Obviously this is much more. You know there will be some arguments and it's good, but I don't think we need to like: let's see what this looks like before we start solving for a problem that we don't actually know what it's going to look like. I mean, I think the fact that we have, + +[14:00] I mean, I think the fact that we have this debate in these questions, suggests to me that this isn't quite the right design. So I still think that there's multiple stakeholders here. Yes, validators are stakeholders, but so are the asset issuers and I don't think that I think that we should give more weight to the asset issuers than the validators. And to the extent that validators should be important, it's only because they're designated as kind of official repositories of information by the asset issuers. I agree with David, but I don't agree with the result. I think that asset issuers should strive to be validators. I think that's like proper SCP usage and then they will have a vote, but you know, just by virtue of having, like, an asset issued, I don't think that's enough to inform the validator network what to do. But I think, as an asset issuer, you are choosing to. I issue your asset. + +[15:00] You are choosing to. I issue your asset on Stellar because you know, in large part because you want a liquid market between your asset and other assets, right, and so you have an opinion about which markets are important for your asset and that's just not reflected by having the validators make this decision. So what are the mechanics that you suggest for allowing acid issuers to do this, David? Well, I think that, instead of a protocol upgrade, this could be a new type of operation or a new type of ledger entry, which is, the other assets that, like you're interested in being in a speed x with and. Once you know, when there's like a kind of mutually satisfactory speed x's, then, like you could, potentially, + +[16:00] Then you could like sort of create the speed x, but it should come, it should. We should somehow have a way for the assad issuers to express which or rank sort of which assets they're most interested in being in a speed x with and but. And we shouldn't necessarily have only one speed x right, because you know, maybe there's, you know multiple disjoint sets of assets and you know who are the validators to say like which one is reality right, like and how are the asset issuers expected to? You know, like this operation, what is it? I think one of the options that we were talking about was like staking lumens- right, are there other options? Or are we just like reverting back to kind of like stake weighted voting mechanisms, the same? No, it could just be It could just be a like a ledger, like you know an account entry or whatever that you say like which assets you + +[17:00] Want to be, like you have trust lines. You could have sort of like you know speedx participations where you like specify almost like a quorum slice of assets that you want to have in your speedx, but every asset's gonna issue, or like lots of assets are gonna issue these and there's- you know, the protocol still needs some way of deciding- like which assets are sort of important. And then maybe there's like some kind of- I don't know max weight matching computation or something going on, but we still probably have to restrict it down to like a certain size or a certain like network objective, right. So there should be a configuration parameter which is kind of like maximum size of a speed x, right? But to me that seems like the only thing. The validators should really be deciding and then it should be the assets themselves that choose to make a market. + +[18:00] What are you wait? Am I unmuted? Yeah, I am. What about the? case of contention: like, take the most liquid assets on Stellar which, like buy by like AMM liquidity, we could say would be like, I don't know, I think it's like USDC, XLM, AQUA, a couple other assets, what, like most assets would want to be able to trade with those, because that's where you get a lot of benefit from speed x being able to swap with the liquid ones. So let's just say that like for the sake of argument to say that USDC has like the most liquid markets on seller. I don't know if that's true, but let's pretend for the sake of argument. And then basically, there's like global contention on wanting to be in the same speedx as USDC. Who do we choose? Well, that gives USDC a lot of cloud. I think USDC should be then should be able to decide, + +[19:00] You know, who gets to be in a speed x with them. Or maybe USDC can be in multiple speed x's. I mean that creates arbitrage opportunity. But maybe it's not ridiculous. Maybe we think that the price of uscc is so stable that if it's in multiple speed x's, who cares? It's the thing that will anchor the valuations of all the other assets. I think we're putting a bit too much responsibility and weight on the asset issuers in this case, like if you look at prior art and other networks, you know like that the issuer just issues their asset and then there's like a governing body for whatever decentralized exchange. Yeah, I understand, and so I would be into like reducing responsibility, but not reducing sort of like the privilege of like a powerful asset issuer. Right, because when it comes down to it. + +[20:00] You know, if USDC still like the problem, is, we might have you, it might be the uscc issues their asset and they just don't care, they don't want to think about speed x if other people have a use for it, that's great, and so it'd be great if, kind of the default word that you could be like recruited into whatever speed x, but that if it comes down to it like you actually have a say in like which markets are more important for your asset. I mean, maybe the problem is I don't have a concrete proposal. So maybe what I need to do in the next week or something is like come up with like a concrete counter proposal for like what this would look like. Or maybe we could kind of a few of us could meet offline and like brainstorm about this. You know, at the very least, I would like to convince myself. Maybe what I'm asking for is impossible, right, but at the very least I'd like to convince myself that it's impossible. Well, and the other thing, like, and I think that's why where I started, when I discussed this on the main list, I was wondering like, + +[21:00] I was wondering like, why does like, if you want to have issuers, for example, have some say in the composition of speedex, that should be part of how that SEP is written, right, for how that set is managed by the validator. I mean it's governance, so you know it's one component. I don't know if it's, if it needs to be kind of baked into the protocol in any way. Really, well, one thing I need to baked in is whether there's one speed x or sort of one or more speed x's, and I, I'm very much of the feeling that you know the whole kind of Stellar philosophy is, like you know, we're not here to tell anyone what the ground truth is. We'll just let you know like you can have some weird quorum slice, that you're some weird quorum, that's an entire, entirely civil attack, and then you can have the real world and like, if they diverge, like no big deal, + +[22:00] Right, and so I would. Similarly, like to be able to have like, if there's assets that are happy being in a speed x, together and they're disjoint, then I don't want to be in a position to decide which are the real assets and which of the fake out. Like validators shouldn't be doing that, I guess, at the same time. Oh sorry, go ahead. No, go ahead, jeff. The validators are the one expending the compute to run it right, so they should have a say at least. I think well, there's fees being collected and I mean you know, yeah, but the there's like a non zero cost of just like running and maintaining speed x. That's more than just like you have the assets in your database. I mean, well, now there's a question of like, should we be compensating validators? Or is like, is it cheap enough to run a validator? That's not a big deal. Well, I'm just worried about like somebody spinning up like a thousand instances or there should be some kind of. We have to process it appropriately, right? I mean? Sure, + +[23:00] I guess it's. I just I prefer to use like market based solutions, like. We think that this like consumes resources on the network. Therefore people should be paying for it. I prefer a marketplace solution to a like validators know best type solution, which is kind of what we're doing here. Let me offer you maybe such a like. It's not quite what you want, I think, but it fits kind of nicely into that space. It's something that I mentioned it both to jeff and Nico offline. So, like, the CAP currently written is basically: like validators control the speed x asset set, and there was a whole line of questioning like could we drive the speed x assets, which I kind of was like I have no idea how to do this. I don't know if it's actually even remotely possible, even feasible, and I presented some examples that are like kind of challenging. But when I was looking at that example, one of the things I realized is like there's probably a core set of assets that you'd kind of want in your speedx, no matter what, like you know the world's most liquid assets. You know, I think the + +[24:00] Example I gave for, like the imf, what's their thing called the reserve basket thing. I can't think of the word off the top of my head right now. It's kind of like a good representation of that. Or in america it's like t bonds and you know s p 500, or whatever. You know you'd always want those, just have the liquidity from them. But there's a whole bunch of stuff you might want to trade. That's not those assets, and so a hybrid solution is okay, like dr's- yeah, sdrs, thank you, that's the word I could not think of. I appreciate it. And so one like world you could imagine is a world where, like you know, you have, let's say, we have space for 100 speed x assets. That's like the com, the compute power we have. So then we say like, okay, like the validators can configure 50 and they can configure it however they like, using some off-chain governance protocol, whatever cares. The other 50 are literally what's in the transaction set, determined by fees, just like any other way you get into the transaction set. So in that case the transaction set can still have 100 total speed x assets. 50 + +[25:00] Are fixed and advanced by the validators. The other 50 are just like, literally the ones that appear in that transaction set. If there's 51 in the transaction set, it's invalid. And that basically means, like, the validators don't have like the truth power, they just have the power to say like hey, like we think these are the things that everybody always wants, so we can try to give you consistently good prices. But why don't we, I mean, why- speculate that these are the things people always want? Why not have people actually say what they want, right, so I mean. So here's a like a dumb thing we could do. Okay, let's say there's only one speed x, but we could have? Is we could have everybody like, vote their lumens for, like which assets they most want to see in the speed x right. And then you know, we just take this set of assets that has the greatest vote, Why are we reverted? Reverting to stake weighted voting? This is like, because it's less Stellar philosophy. + +[26:00] No, why is it a anti Stellar philosophy? Because, like, we shy away from the idea of: you know, more money equals more votes. Well, I mean, what we're trying to capture with SCP is like some abstract notion of cloud, right, and you know we want this idea of like. If people are interested in a particular, in a you know particular asset, then and that asset you know, say like, designates a validator, then like that validator is going to have more clout in the network. I mean, I agree, like this stigmatic voting isn't perfect, but it's at least a market based solution. The problem is like you can't have a market based solution to consensus if you don't already have consensus, or you can't. But + +[27:00] I mean there's our proof of stake systems, but they have all these other drawbacks right which we don't have. But once we have consensus, then I'd rather have you know, market based solution. Just because we do trust the validators to maintain consensus doesn't mean we should give them lots of extra power too if we don't have to. I think, going back to my original appointment, I think this is a great opportunity to get stakeholders to actually run their own validators- which is what you know, like we've been talking about sap forever- as actual stakeholders running validators. This gives them a reason to do that. I guess that's true, but that it seems a little indirect. I mean, I actually agree with tomorrow's point here, like if you're issuing like an asset that's going to be used by everyone, then like you certainly at least should + +[28:00] Like have a strong incentive to run a validator. Or if you're not, you're sort of free loading off of everyone else running validators. Well, I mean in an ideal world you wouldn't necessarily need to run a validator but you would need to like hire someone to run a validator. So I think, like a reasonable like state of the world in like the three years, is that there are various companies like block, demon or whatever, who run like sort of validator in a sorry anchor in a box type products where you know what you could do, is you could, you don't have to run a value, but you pay them to run the validator for you and they can provide insurance or like you sort of whatever else and it's there's no reason why someone just is like probably a lot of people like running their validators on amazon, right, and so we don't need, like everybody, to run a physical server. We just need everyone to say, hey, like, we trust amazon to like host our validator and like, and that's fine, but it's. But they're at least explicitly saying like, yes, this is the validator that you should trust. Sure, but + +[29:00] There's that contractual relationship there and through that I can tell whoever is running my validator to vote for my asset to be in speed x in whatever way. Right, and you know I'm still funding them to run the validator. But the problem is that now let's say that you know, in three years, you know 40 of all the kind of assets that anybody cares about have like contracted with block demon to be their validator. Right now we still haven't, you know. So now, like, we haven't told block demon like how to wait, like various votes or whatever. So it's now, is now black demon gonna be actually the one deciding which assets are important? This is going to be some term of like you know, negotiation and these like confidential contracts they potentially have with their more important customers, or can we do it in an open, transparent, like you know, reasonable, non discriminatory way where it's just like public how this is happening and the decision is out in the open? + +[30:00] You guys mind if we zoom in on a different aspect of what David was talking about a second ago for a few minutes? Can we zoom in on the one speed x versus multiple speed x question? I think like we have not given a lot of thought to this, did not come up on the like on the Stellar dev mailing list also. I just want to make sure we talk about it at least a little bit. I think it loops back into the other question as well. I can imagine. I think that if we come up, it's possible, if we come up with an answer to the, thing we're just talking about, that it would work more easily if there's a single speed x, but otherwise then you know all sql. We should have multiple speed x's. I would. Yeah. My vote is like I don't find the idea offensive. I'm just not sure that we need to be thinking about this right now. I think that if we do get to a point where it looks like the list of relevant + +[31:00] Assets is growing to a point where this is like it's too much for a single speed x, then we can start, you know, contemplating the idea of adding another one, but it seems like getting started with more than one speed x, given the shape of the markets on Stellar. I think it's getting ahead of ourselves. I think that we should absolutely think it's like we literally have, like you know, an XDR diff in front of us, which is actually what we're debating at the end of the day. and so there's absolutely a question of like: should there be an array of maximum length, one such that, down the line, we can, like you know, increase the array size, or should it be sort of hard coded like this to have like only a single speed x. But again, I think it's like preliminary to make that decision right now until we figured out how we're going to choose what the assets are. But I think like an important kind of safety valve of like ensuring that, like we're + +[32:00] Choosing assets in a kind of Stellar and like egalitarian way and market based way is to say that hey, like you know, if there's like complete disagreement in the world, then fine, like have two speed x's, like let's not decide which one is the civil attack and which one are the real assets, okay, let's go back to the other thing for one second. Then, since there's people don't seem to want to talk about this, but let me offer you a thing, a version of the world where issuers actually have some control. It's like a kind of slightly weird version of the world, but it's not one that we've spoken about yet. So in this version of configuration, the validators don't vote on any asset. Configuration doesn't apply at all. Every issuer can like produce a list of assets that they'd like to be in speed x with them. My guess is that most of those lists will be exactly the same, + +[33:00] Or we could even have the validators configure the default list and you only get a different list if you actually put something in that way. If you're happy with the default, you don't have to do anything. My guess is still that they would all be very much the same, but it's possible they wouldn't. And then the way that we get the actual set of assets in speedx is we take every asset that appears in the transaction set. You bid for fees to get a transaction step, so that's very market based and then the way we fill out the rest of the asset set is we go over every asset issuer for each of the assets that appears in the transaction set and we grab the asks from their list. Now, like the reason you have to do that second part is like you definitely don't want to. So there's a different speed x, for every block would have a different set of assets potentially, but actually I almost prefer this. We can even say like you're just not allowed to when you're forming a block, it's like ill formed if your speed x involves more than whatever, the parameter number of assets is right, so any given block + +[34:00] Can only have a certain number of assets being touched by the speedx operations, and that could change from block to block. But hold on, just I want to make before you go, jeff. I just want to make sure that I'm on the same page as you, David, do you want the pure anarchy approach where the validator, like the validator nominating the transaction set, chooses all the assets in the set? Or do you want the semi anarchy approach where they choose the asset, like they choose the off the operations, and then you go to the issuers to get to fill out the set. No, I think actually, the idea that, like each you know, you have one speed x per block and it just there's some maximum number of assets that can be mentioned in a given block for the speedx transactions, like, I think that this well so. One point like: why are we talking about filling out the asset set? My instinct + +[35:00] Is that in almost every situation, we're going to be having to prune down to a certain size, not like fill out assets. And then point two, I think is there's a huge value in having predictability in what the assets that's going to be. Otherwise, like I like, I like otherwise that like, maybe you know, some validator says like I'm going to have this at some other validator says I'm going to have that asset and I'm like now I started playing games about like where I send my transactions and then like transactions get, you know, buffered, and then things get front run and then starts getting really messy. We'll have a deterministic set of rules like it'll be whoever like whatever collects the most transaction fees or something. But I can still gain that with the deterministic. I like I had a hard time coming up with like a deterministic rule. That wasn't gainable because I can always submit like transactions that are out of the money or whatever to get my assets in. Maybe I'm just being paranoid, I see. So you're saying like you can. You + +[36:00] See, so you're saying like, you can create standing orders. Right, I can set an order with like a minimum price of like a million or something, and that'll never trade and manipulate this, manipulate the asset selection rules, which then manipulates the prices. I think I mean I'd like to brainstorm about this, maybe offline a little bit, because, like, if there's transactions that you know are not going to like I wonder if you can have a. You know how, like you can have a post limit order only, type order, so, like, maybe those could be free, and then, like, maybe we could have people post, however, and cancel limit orders, like for as many assets as we want, and then we actually choose speed x based on like the. I don't know the depth of the orderbook or the number of transactions or something, because, in a way, you really want speed x for the trend for to like you + +[37:00] Want the ones with the highest number of transactions, because that's where you need the extra efficiency. I don't know if that's true at all. I mean, like you should just revert back to the mailing list where I gave like a whole bunch of examples every time anybody mentioned like a derivable set about how you could game them and I feel so confident that they're all gameable that I'd be willing to wager that for any heuristic that you came up with I could find a way to gain it. I feel pretty confident. Not that much, but I still feel pretty confident. So I don't know, David's come up with some pretty crazy stuff before. I feel like he could maybe come up with something that I couldn't figure out how to game, even though I still believe it would be game. So that's why I'm not willing to really make a big league. But my point is, like any kind of heuristic is probably extremely vulnerable and like the burden of proof to get me to want to vote for something like that, to want to support something like that would be convincing me that it's not gameable. + +[38:00] And I think that's going to be really hard. Just like just being honest, I think driving the transaction set is just borderline, so but if there's multiple speed x's, it could make the gaming not a problem. Right, if the way I game, the system is by creating a second speed x, who cares? Right, but we still like to determine the competition. Yeah, it's not like you're creating an infinite number of speed x's either, so at some point you have to pick which one of those feed x's is well. It's not like you have an infinite number of transactions in a block either. So no, but that's, it goes back to it. It's all gameable. I think there's just generally value in having a system that sort of runs predictably here, I agree, but all right, I feel like that. I feel well. Look, this thing obviously is not ready to move, oh well, to be accepted today, + +[39:00] Anyway, right. So I feel like now, and I feel like now I have a challenge to like, at least propose something, or try or figure out why I can't. You know, I think we should also think about or, sorry, go ahead. I did want to answer one question that you had actually before, jeff, which is like: why are you talking about filling out the transaction set? And it goes back to that. Like that example I posted, like it's like five or six or seven or eight emails back now on the mailing list, like it was like an example using like major world currencies and like it was a trade for like pounds, for yen, but basically, like the concern I have is like you end up in a world where, like, imagine that, like you know, imagine that the russian central bank just like happened to announce for some crazy reason that they were going to raise their interest rate from 9 5 to 20- not that this is realistic at all, but like, imagine that would happen. I imagine that there might be a ton of people who wanted to sell their rubles, maybe all for euros and usd, + +[40:00] But like, if you only have those three assets, you're potentially missing out of a lot of like cross liquidity that you can get from area to bro markets, like you know where you use the other AMMs to pull more source and sync through so that you can trade more rubles at a good price. So now imagine that for some crazy reason, USDC doesn't want there to be a very liquid market between rubles and USDC and so they feel like if they're going to be in a speed x, They would prefer to be at a speed x without e, rubles or whatever. That is a good point. I think at least the asset issuer should have a veto on participation. On participation, well, either they should have veto or if they should at least not be in any way complicit like it should be, like everybody else wanted it and they were overruled or something. Yeah, I guess yeah, and also. We've been expressing like: asset issuers want good markets with certain assets and we should also let + +[41:00] Them express the opposite opinion. I don't want to market with this asset. You know, listen, six years. Here's something we could do We could actually have like speed x's owned by account. So we could say like, look, we're actually going to. You know, instead of a validator, there's just like someone everybody thinks is important and we're going to let them choose the assets and they're going to create a speed x. And if you don't like that, then you can create your own speed x. And if people start trading on your speed x, great. If not, then you're irrelevant. This makes it harder with the liquidity pools and also, isn't this like more unaccountable than the validator? Well, it's both more and less like we know who it is right. There's now like someone who's running the speed x and if that person needs to comply with like russian sanctions, that person can remove the ruble. And if somebody else, you know, if somebody in russia says, hey, you + +[42:00] Guys suck, I'm going to create a new speed x on Stellar. You know more power to them. They can do that, but if none of the other assets want to move over to the new speed x, then that new speed x with the ruble, it'll be the ruble trading against nothing, right? What if we did something with, like asset issuers issue preferences and then validators issue like weights to different assets and then we compute via some like global optimization like min- I don't know max matching, or something based on the express preferences, or something sounds complicated. At some point you're really starting to ask a lot of people to do a lot of complications. Okay, so well. So what I'm saying is simple. What's wrong with my thing? Like there's just a, there's just each speed x has a dictator, but there's multiple speed x's. So at the end of the day, if the dictator is bad, people will just go to a different speed x. And is there in your world an unlimited number of speed x's that can be created? Because that's our question: how do you stop an + +[43:00] Evil dick or, just like some, someone who should not be creating a bunch of speed ups maybe they were there. You need some like reserve currency or you need some reserve balance to do it or something, or like: well, what we have, I think we're probably fine having like five speed x's. Right, you probably don't want like five million, but this has the nice benefit of giving a particular person accountability for like running the price computation. But it also we need to figure out how to bring the liquidity pools like what. If there's a you know competition between for like liquidity pool liquidity, I think we could just have arbitrage opportunities. I mean like we hope no, but like you can't run this price competition with like. If USDC and euros are both in two different speed x's, then there's like contention on the liquidity pull between the two assets and like maybe there's only maybe the only asset that can be more than one liquidity pool + +[44:00] Is the lumen, because no one's there to like control the lumen, but otherwise the asset issuer has to say which liquidity pool they want to be in. Right. So then we're saying like asset issuers declare exactly which fedex they're in. Yeah, which is back to the first point that you decided you didn't like, or somebody decided they didn't like it. There's one additional point that I want to bring into the picture here, which is, you know, our acid issuers. They're active on several networks and a lot of the activity in the space is kind of like multi chain the. I think we really need to focus on our differentiators being like high value differentiators, because every time we add something to Stellar that makes it so much more different than any other network index out there, we just make it more challenging for these players to actually be idiomatic players in a Stellar ecosystem. + +[45:00] So we really need to make sure that whenever we add something that's different, it's really worth it. Okay, and having asset issuers need to be involved in listing on decentralized exchanges is a big step away from what they're used to be used to doing right now, so I just want to be cognizant about that. So, yeah, I think your point is right, but it's not inc. So I think what we want to do is we want asset, we want to give asset assures the control. But we want to ensure that something good happens to an asset issuer who just doesn't care and hasn't configured it yet. Right, like maybe USDC, like they can't be bothered to learn about speedx, but everybody else is interested in that asset and so we need to make sure that makes it onto like a good speed x. You know, like going like a different direction in this conversation, do people actually care? Like right now, there's no way to really stop your asset from trading against another asset. Like + +[46:00] It can happen on the DEX for, like basically any asset. Like as long as you, as long as somebody is authorized to hold your asset, like I mean they can trade it for anything they want. So like, why should we give that additional power in speed x? Like I know that there's some like interesting because of scalability, right, only if, yes, if we could have a million assets in a single speed x, I would say we shouldn't do any of this right. So it's only, I think, for the scalability problem. I guess my point is like there are two kinds of preferences we were talking about. One is the positive preference, like I'd like to have this liquidity available to me. That's pretty easy. Like my proposal, or rather, not to me but to my, to like people who want to trade my ass, I'd like to have this liquidity available. The reverse position, which is like I'd like to not have this liquidity available, is not something expressible really on Stellar today. I don't know why we should make that accessible. So basically, what you're + +[47:00] Saying is like you shouldn't- you shouldn't- let an asset issuer prevent their asset from being traded with ruble anyway, if they really care. What they should do is they should, like you know, use reclaimable assets. They should sanction, you know, they should KYC their users and they should like to go to the appropriate authorities if they have evidence that someone is like evading sanctions. And you know they've KYC people. So, exactly, so, okay, I think that's fine because that's, and if they really care, they can use clawback or they can, you know, revoke the trust line to freeze it exactly. They can basically say, okay, like you're doing stuff that we don't approve of, like it's not in our terms of service. That's true and, in fact, so really, what we should be thinking of is the only reason we're talking about any of this is because we can't put every asset in the speed x. It's just for scalability, and so just real quick question about that is a functional limit that we can't change. Is that correct? Or is there a chance that we could, + +[48:00] Through some sort of engine ingenuity or engineering research, breakthrough? Absolutely, but they're like, the limit is: just remind me, like, what is the reason for the limit? That's a great question and jeff is the best place to answer it. Right, so there's two things right. So there's the price computation itself and then the sort of follow up correction- linear programming- and basically the runtime of each loop of the price computation is linear in the number of asset pairs and also you might need more rounds based on more assets. This is especially like you need more rounds of the price computation, typically for assets that are more thinly traded. And then so there's that scalability problem and then there's also the follow up sort of linear programming step. If I just threw a standard linear programming solver at it, I would sort of + +[49:00] Programming solver at it. I would sort of that seems to top out at like 50 to 100 assets. And then I was running experiments last night, which you know- caveat that code that you run right late at night is usually broken, but I was pulling in some other open source libraries that I hadn't played with before and that seemed to push it to like a few hundred, like three or four hundred assets, but after that, you know you're solving a, some variant of a linear program which has typically a quadratic or cubic or something, some kind of run time, which sort of no matter what. You're eventually going to have a prohibitive bottleneck there. There's some things you can play with it and possibly you know breakthroughs you can make. But we're doing some more research on that topic. It's something that, like me and jeff and a couple other people around SDF were talking about quite extensively yesterday and it looks like you know it's pretty clear- we can do better than jeff's original estimate of 100. But like getting significantly better than a thousand also probably looks kind of out of reach right now. + +[50:00] Like, even if we can do better than you know what jeff hacked up in the middle of the night yesterday, I wouldn't be confident to say we could do a thousand. That already sounds really high. Maybe we'll find out we're wrong. But like 10 000 or you know 50, those are numbers that like are not even really in the conversation, like they don't even sound remotely plausible within like the limits we're working right now like to achieve those kinds of numbers, which would still be a fraction of the total number of assets on Stellar, we would be like we'd be, as David said, like doing novel, perhaps unprecedented research. So here's an idea. What if each speed x had its own orderbook? Sorry, what do you mean, in other words? So the problem, so one of the problems with having the, with the sort of the dictator, one dictator per speed x model that I think John brought up- is that you could have two dictators, each of + +[51:00] Whom wants to include both USDC and eurt. Right, and so now what if, like you, so now they're sort of competing for the same set of orders, but they- the pearson, the person placing the order- could also name the speed x Or we could somehow divvy up the or the orderbook between speed x's. I mean, this seems fine to me. I think anytime you have multiple speed x's with the same assets, you're going to fragment liquidity. But or, actually, what if you just run them in different phases. I mean like you know, if we only have like five speed x's right, if we run the speed x's serially, then we just have some random order: you do the first speed x and then you just use whatever's left over the orderbook to do the second speed x and then the third speed x. + +[52:00] My real concern is just like I just really feel like the returns of doing this are just like really marginal, like my intuition, and my intuition's been wrong many times before. So I'm not claiming that what I believe is necessarily right, but like my guess is all the really high quality liquid assets appear in the first speed x, whatever you want to label this, first spur speed x and some ordering. You know I'm not saying that the first px might be the one that executes second, right, I understand. But what I mean is like, basically, the first speed x is the place where you want to trade and it has all the value. Basically, right, the first in terms of the liquidity ordering, and then all the second tier assets trade with each other, but because they're all low liquidity assets, the aggregate liquidity is really low. There's no good asset in there, no super liquid acid in there to provide a lot of liquidity, and then anything that doesn't make it into that one has even worse liquidity globally. And so basically, it's like you know, you have like the vc thing where it's like the first thing has to do better than all the other things, the second thing has to be better than all + +[53:00] The other things except the first, and like is there anything? So that's why I think there wouldn't be more than five speed. X's right, but I just it's a way to. I just don't want to put validators in a position of having to like justify, like particular assets. I imagine that like there's a really popular asset and it turns out to be a fraud and like you were operating a validator and you like voted to include that asset in the thing right. So now are you liable for that? Like you could have chosen a good asset and you chose to make it easier for people to buy this fraudulent asset. Like, what's your responsibility there? I'm not a legal expert but like, probably very little. Like I don't think that you know american stock exchanges get listed when you know, like sorry, american stock exchanges get sued over listing- well, nobody is, but that's a good example. The stock exchanges have like relatively stringent requirements for companies to get listed and when the companies don't + +[54:00] Comply, they get de listed right. So we're now putting so. If we use that analogy, now validators are actually required to police these assets right and to, you know, potentially making sure that they're disclosing things and whatever, because otherwise they bear some responsibility. That is a good point. And it's also possible we would get validators just like not paying attention and voting no on everything so they don't have this liability, and then we wouldn't have any assets in speedx. So at this point we only have two minutes left and I think what I want to make sure that we do is it seems like there are some actions that need to happen between now and the next time that we need to move this long and principally, David, I think it falls to you because it sounds like you want to come up with some sort of concrete explanation of another way to determine, or at least try. Let's try. Yeah, is it so? That's sort of something that you're gonna work on. Is there anything else that needs to happen after this meeting in order to keep the conversation going forward, or is it + +[55:00] Primarily we should move all the extension units to the top of the structure? I'm not never gonna vote to prove another proposal on which the ext fields aren't like the first one in the structure, but that's not a huge ask. I have right, do some housekeeping, we should do something. But for the speedx configuration proposal, like I believe from everything I've heard, it's time to see if David can come up with an alternative, concrete proposal. Right, and maybe we can meet offline or something. Maybe, John and jeff, we could like talk at some point, yeah, this afternoon or what. And likewise I'll, like I actually have, some like intrigue about the proposal I made quite a bit earlier about, like the, you know, take the stuff from the transaction set and then use the issuers list to fill out the set. I'll write something up on the mailing list about that too, to kind of give like a full overview of how that would work and why I think it's not a terrible idea. I think it fits somewhere in between some of these other proposals and David's proposals. So, + +[56:00] All right, perfect. So we'll look forward to that stuff and obviously the sooner that you can get it done, ideally if there's something for people to read with enough time in advance for the meeting. So like there's just, you know the meeting is two weeks. Yeah, that's ideal. All right, cool, thanks everybody. Thanks everyone for watching. If you want to follow along, there's a link there for the Stellar dev mailing list. That's where these discussions happen asynchronously. So please join and watch the discussion unfold and contribute if you have good ideas. Everybody, thanks so much. Bye. + +
diff --git a/meetings/2022-03-11.mdx b/meetings/2022-03-11.mdx new file mode 100644 index 0000000000..92a4ea371d --- /dev/null +++ b/meetings/2022-03-11.mdx @@ -0,0 +1,229 @@ +--- +title: "Create Innovative NFTs on Stellar with JavaScript" +description: "Hands-on workshop covering how to design, mint, and trade innovative NFTs on Stellar using JavaScript, with a focus on composable NFTs, royalty enforcement, and Stellar-native primitives." +authors: [anke-liu, kalepail] +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This developer workshop walks through how NFTs can be built on Stellar using JavaScript, focusing on practical architecture rather than hype. Speakers explain how Stellar’s account model, authorization controls, and transaction composition enable programmable NFT experiences such as enforced royalties and conditional transfers. + +The session combines conceptual grounding (what NFTs are, how they differ from fungible assets, and why smart contracts matter) with a detailed, end-to-end technical demo. Developers are shown how to mint NFTs, attach IPFS metadata, control asset behavior via authorization servers, and implement royalty payments that persist across secondary sales. + +### Key Topics + +- Overview of NFTs as programmable, non-fungible assets and how smart contracts drive on-chain experiences +- Why Stellar is well-suited for NFTs (low fees, fast settlement, accessibility, and built-in DEX) +- Stellar-specific primitives: accounts, trustlines, Horizon API, asset authorization, and sponsorship +- Anatomy of a Stellar NFT: issuing account, IPFS-hosted metadata, and transfer controls +- Building “innovative NFTs” by combining NFTs with custom transaction logic +- Live walkthrough of a JavaScript project that: + - Mints NFTs on Stellar testnet + - Stores metadata via IPFS + - Enforces royalty payments on every resale + - Uses an authorization server to control asset behavior +- Practical discussion of decentralization trade-offs and future paths (Turrets, upcoming on-chain smart contracts) + +### Resources + +- [Stellar NFT Hackathon on Devpost](https://stellarnft.devpost.com) +- [Stellar Horizon API Documentation](/docs/data/apis) +- [Stellar JavaScript SDK](/docs/tools/sdks) +- [Lightmint NFT Marketplace on Stellar](https://lightmint.io) +- [InterPlanetary File System (IPFS)](https://ipfs.tech) + +
+ Video Transcript + +[01:00] All right, let me know when those streams live. All right, hello all. Thank you so much for coming and also thank you for bearing with us. We had a few technical difficulties, but we're able to get it started shortly, you know. Welcome everyone to the workshop. My name is Anke Liu, I'm a program manager at the Stellar Development Foundation and I'll kick off today's workshop and this weekend's hackathon. You must know me from the many emails I've sent you over the past few days with the result of you either being here in person or watching virtually + +[02:00] And like. So, again, thanks all for coming, and let's start off a little bit about SDF. So the Stellar Development Foundation, or SDF for short, is a non-profit organization founded in 2014 to support the development and growth of the open source Stellar network and, with seller, stf seeks to unlock the world's economic potential by making money more fluid, markets more open and people more empowered. The foundation helps Stellar maintain Stellar's code base, supports the technical and business communities around Stellar and provides thought leadership to regulators and other stakeholders. So what is this network that we're looking to support? Stellar is an open source blockchain network with the goal to provide financial infrastructure that's fast, cost effective, resilient and, most importantly, available and accessible to anyone, + +[03:00] Regardless of where they live, and this is really important. Individual payment schemes are currently not interoperable in the traditional financial system and this leads to high fees and delays, especially when transferring value, and it's also inequitable, as it excludes billions of people who currently do not have access to traditional financial services. Blockchain technology is challenging us to think about money and value in a new way, presenting the opportunity to make it better, more effective and more efficient. Its use cases go far beyond just investing in cryptocurrencies and, in much the same way that the internet democratized access to information, Stellar uses blockchain to democratize access to the financial system and it can make sending value around the world as easy as sending an email. Stellar was founded in 2014 after bitcoin. Stellar + +[04:00] Was founded in 2014 after bitcoin and Ethereum- oh sorry, I think my mic had some of my hair- and handles millions of transactions each day submitted by businesses, banks, governments and developers like yourselves, all over the world to build innovative financial products and services. To date, there are over 90 2000 unique assets issued on Stellar and there are over 6 million accounts in existence and in 2021 alone, the network processed over two billion operations, from which 155 million are payments, and this shows really that the network is built to scale and is actively used. And a Stellar is an energy efficient syncing mechanism specifically designed for remittances and payments. Transactions are processed within seconds and cost way less than a penny. So what can you actually do with Stellar? On + +[05:00] Stellar, you can issue your own assets as tokens on the network- think stablecoins like uscc, cryptocurrencies, real estate and, of course, nfds- practically anything. Then you can also trade these tokens peer to peer using a built in decentralized exchange or the recently added automatic market maker functionality. You can also transform currency as you send it, which is a powerful feature for cross border payments. But let's get back to why you're all here today: the Stellar NFT hackathon. As the main sponsors of this year's south by southwest financial 3 0 summit, the Stellar Development Foundation is sponsoring a 48 hour hybrid hackathon starting right now that challenges the experience and newcomers alike to build a project on Stellar that incorporates the use of NFTs. So we have ten thousand dollars worth in prizes, + +[06:00] Worth of excellent prizes, excellent Stellar's native currency, and these prizes will be awarded to the best implementations of the following challenges that combine the best of art and technology. So let's start with the artist challenge. So first up, we have the Stellar NFT artist challenge, where you will create art based on a prompt and mint it as an NFT enlightenment, which is an NFT marketplace on Stellar. And so the art prompt that we'll use for this challenge will embody the spirit of the metaverse. So there is a very different, many interpretations to what the metaverse actually looks like. So we encourage you to explore your inner artists and create an image, video or audio file that reflect your version of the metaphors, in whatever form it takes. Awards for the most stunning NFTs will be willow win awards totaling fifteen hundred dollars worth of XLM allocated by the Stellar dev and NFT community, and + +[07:00] To participate in this particular challenge you can scan the qr code or enter bit li Stellar NFT artist and register. But, more relevant to this workshop, we have the Stellar NFT wizard challenge, where you will build innovative NFT experiences on Stellar, go beyond just buying and selling and explore how these composable NFTs open up opportunities for you as a developer. Prices totaling 8500 worth dollars worth of XLM will be awarded to the most interesting implementations. So to all of you watching right now, head over to stellarnft devpost com and make sure to register today. The deadline for both of these challenges is sunday, March 13th, at 2: 30 p m central time. So go out there and build something awesome. And all communications will be in the Stellar developer Discord, including the live chat for today's workshop. So definitely + +[08:00] I recommend, if you want to, definitely any questions, please scan the link or head over to the steli developer Discord and ask your questions in the workshop live chat. And all of the virtual identities and in personalities should have received an email with that link as well. So if you're not able to scan it, please look it up in your email. And now I leave this up for a bit while- I give the word to Tyler to actually start our NFT workshop. All + +[09:00] Right, I will add my welcome. Thank you all so much for attending. Thanks for your patience while we got ourselves settled. We're here to talk about. NFTs are an incredible economic innovation being developed on the blockchain, but, as with any innovation, there's a lot of funk mixed in with the fresh. It's my aim today to focus on some of the interesting use cases for NFTs made possible by the incredible technical functionality of the blockchain they're built on, specifically the Stellar blockchain. But who am I? My name is Tyler van der Hoeven and I lead ecosystem engineering at the Stellar Development Foundation. I've been at the SDF for just over two years, but have been building on the Stellar stack for almost six years now. + +[10:00] I've built many various production and experimental products on Stellar, everything from games and developer tooling to educational resources and funding programs. Stellar is a vibrant and expressive tech stack able to handle an incredibly diverse scope of financial use cases. Before we get into the meat of the talk, I'm going to quickly call out our Stellar developer Discord server as well and ensure that everyone has a chance to join that. Myself and the rest of the DevRel team will be hanging out there this weekend to answer questions and help you as you continue your exciting journey into Stellar. And once you join that Discord, the server you're going to want to look for is the workshop live channel for entering your qa. We're going to have a couple of sessions for Q&A today. So if you're in that Discord server, if you have, as you have, questions, enter those into that Discord server so we can get to those questions later on + +[11:00] During our Q&A. All right, let's talk road map. Where are we headed today? What will we be covering? Our talk is going to be divided into three main sections. We're going to talk about kind of the theory and background, NFT's explained, talk about NFTs more specifically on the Stellar blockchain, and then finally we'll have a break in there and then finally we'll come back to actually create an innovative Stellar NFT. So we'll get into the codes. So firstly we'll dive into NFTs- explain more generally. So we'll define some terminology. Then we'll cover some basic technical underpinnings of NFTs with a specific aim of ensuring that we lead with the tech of NFTs and not the hype. This is a cart and horse scenario- and we must ensure the order is correct- or will end up in the funk arena of NFTs, and that is not where you want to be. Finally, we'll spend a little time covering a specific use case involving creator economies. This + +[12:00] Is an area I believe is ripe for. This and upset. So, as you're thinking through in your heads, what might I build with NFTs, or what should I produce for this hackathon or even looking beyond, this is an area that I'm particularly interested in. The goal here simply to properly orient our basic understanding of NFTs as technology, first before use case, and then to hopefully ignite some excitement for what the proper orientation will then allow us to build. Next, in section two of our talk, we'll pivot our attention to observing NFTs on the Stellar blockchain. We'll cover some basic Stellar terminology to ensure we don't get too lost in the woods of mumbo jumbo. From there we'll take a closer look more specifically at the Stellar chain itself and its unique benefits as the host chain for your NFT initiatives. Next we'll, finally, we'll spend some time dissecting an actual NFT: live on the Stellar blockchain, observe its anatomy, it's technical composition. Finally, we'll end this section, + +[13:00] The second section here- actually playing around with the project that we're going to be building later on, after the break. The goal here to make a convincing argument for Stellar is your chain of choice for your next NFT project, and to crack the door on what they might actually look like in practice. After this, we'll have some time for some questions and then we'll move into a bit of a break. Finally, we'll come back for section three. After the break. We'll come back, start working on our project. For the afternoon we'll actually dive into the code covering the layout and design constraints, criteria of our project, to ensure that we're all on the same page for what we'll actually be building. Then we'll dive right into the code as I'll walk us through the repo that I've constructed for this project. It'll be a guided tour, if you will, through the wonderful world of innovative NFTs on Stellar. Finally, of course, we'll end with a with that repo and a website where you can all clone that and play around yourselves in the hopes that project will serve + +[14:00] As a good launch pad for your own innovative NFT projects. All right, NFTs explained, let's cover some terminology. There's really only three main terms that I feel like we really need to define at the outset, terms that, regardless of whether or not you agree with me or not, are fundamental to our understanding of the construction and utility of NFTs, and these terms are fungible, non fungible and smart contracts. We won't spend a lot of time believering these terms. I'm just going to define them for the context of this talk and then move on. So first, fungible: fungibility is the defining characteristic of an asset able to be divided and interchanged indistinguishably from other units of that same asset. So you can think about it like drinking from a glass of water. You can sip more or less water, but it's all just water. Each drop is indistinguishable, identical, equal to every other drop in the glass. This + +[15:00] Is traditional. This is like traditional currency. So dollars, dimes, pennies, it's just money: divisible, exchangeable and identical in value. Its utility is to be an avenue of exchange for something else. It is not itself the commodity. Alternatively, you've got non fungible so non fungibility is aiming to be a commodity or a collectible- remember, NFT stands for non fungible token- so an asset intended to be held and enjoyed or at least to be a direct vehicle for those types of experiences. Non fungible assets achieve this in their nature as indivisible assets, where each one is a whole on its own, distinguishable and unique. Where fungible assets behave like water, non fungible assets behave more like tickets, both opening access to experiences and each one being unique at some level. Even as there may be tickets to the same experience, each one may be for like a different seat. Finally, + +[16:00] That brings us to smart contracts, the final essential term in the, this sort of trifecta of NFTs more generally, and this I define as a decentralized computer function controlling cryptographic accounts. This is the experience engine of our NFT. NFTs are the ticket unlocking the experiences as defined by these smart contracts. This is where all of the innovation lies in our work on NFTs. This is where we begin to do things others could only dream and hope for in previous generations. What blockchain gives us is decentralized cryptographic computer functions. Is this ability to unlock an entirely new way to interact and transact within our communities? Let's dive into that a little more. So NFTs: let's talk about NFTs explained. We start at the fundamental, foundational layer. This is the programmable, + +[17:00] Contractible, decentralized ticket concept. We can move safely and powerful, into a new realm of behaviors, so asking questions which must always return consistent results. These aren't necessarily new questions, but they've never been decentralized and assured to be consistent. Before, there was always some third party entity in the middle able to alter state, change the narrative, disrupt the experience. What NFTs give us is a tech stack on which to build consistent, programmable experiences without disinterested or potentially or eventually, nefarious middlemen, and this is a revolutionary concept and I really want to. I really want it to sink in. And it's tough, because nowhere else in our world do we operate like this. Absolutely every internet interaction that we have today operates on fluid fundamentals. Prices change, fees change, morals change, availability changes, culture and ease of access change. Nothing + +[18:00] Is consistent, but math is. Computer. Programs are smart contracts on chain are. Tie that to internet experiences and you've just changed for the better. Every online interaction for everyone, everywhere, instantly. No, if you're not getting it yet, let's look into an example: modern creator economies. So consider all the big social media platforms of our day: instagram, facebook, tick tock, youtube, etsy, spotify, twitch, the app stores- these are all the way that we entertain ourselves today. These are the platforms where creators get paid. It's where audiences pay to enjoy the art and creativity of their fellow humans. It's a wonderful thing that these platforms exist, but there's one singular, fundamental flaw: the platforms are fluid. There's no consistency or guarantee. The only thing that you can be sure of as a creator and consumer, is that middlemen will squeeze out + +[19:00] Maximum profit margins between your money in and the creator's paycheck out. It's unfortunate, but now, thanks to blockchain and NFTs, it's unnecessary. The programming, the contracts that run these platforms could and should be run on chain. Wherever the transfer of value is involved, consistency can and should be added to ensure the link between creators and fans is as close to peer as possible and then all decentralized, and all of it decentralized such that no single third party can alter the fees, shut down the content and suck as much life out of the experience as possible. So alternatively to that, what we have today, let's consider enlightenment, a decentralized NFT marketplace. On Stellar, which we'll observe in use in a bit, the essential monetary transaction programming is all running directly on chain enlightenment, the visual interface has been disassociated from the monetary compensation model. Artists and fans can connect + +[20:00] Directly via on chain NFT data. There's a world of work to be done beyond just marketplaces, though. We really need to move away from just curation and discovery to deeper connections and interactions with those we already enjoy and appreciate. So I really consider this to be a greenfield space for innovation and an entirely new way of looking at creator economies. It's really a creator micro economy where interactions and experiences are much smaller and more catered directly between artists and fans- living room jam sessions kind of- versus just another picture and an endless feed of content. NFTs allow for this. We just need to build for it. So this brings us to our second section, where we move away from NFTs more generally and begin to look specifically at Stellar. So, just like NFTs more generally, there are some terminologies, some terms that we should be familiar with before jumping into to Stellar more deeply. First is core + +[21:00] Is the fundamental software Stellar validators used to run the Stellar blockchain. All the features and functions describing what Stellar can and cannot do live here in the core program that is Stellar. Next Horizon, the api access point unpacking core data into a collection of usable json endpoints for clients to connect with, both on the browser and on the server for the applications we'll build on Stellar. Most of your interactions when you're building Stellar applications will be interacting with Horizon at some level. Another one that you'll see pop up a lot- we talked about just a moment ago- is lightmint. This is our premier nfc marketplace, which is pioneering NFT best practices for the Stellar ecosystem. Another one is ipfs interplanetary file system. It's the de facto decentralized NFT metadata database. So it's not a Stellar technology, but it is one that we can easily integrate + +[22:00] With when constructing our Stellar NFTs. That's one you're going to see popping up a lot. Finally, trust lines: this is a Stellar specific technology. These are assets. So trust lines are assets on Stellar. They exist as trust lines. For an asset to exist, a holder must first opt in by creating a trust line to the assets issuing account. This is accomplished via a change trust operation, which we'll observe later on when we dive into our project. So these are terminologies you're going to see popping up a lot in different documentation, and we'll cover some of these in more detail later on. Next, why build on Stellar? There are many reasons you might choose to build on Stellar, not least of which are things like Stellar being fast, accessible, affordable, lean, secure, scalable, compliance, friendly and reliable. However, of all these significant benefits, there's one often unsung hero in this entourage of excellence that I would + +[23:00] Really like to highlight, and that is that Stellar is equitable. Stellar is a purpose built blockchain on a mission to become the global payment standard, and at the floor of this objective is the fundamental technical requirement that Stellar maintain accessibility. The only way to become a global payment standard is if everyone in the world is able to access and benefit from the chain. From this foundation of accessibility comes reliability, but I don't really mean- or I don't just mean- that the network stays up. I more primarily mean the reliability that comes from the actual governance structure of the Stellar chain. Blockchains are expensive and validators shoulder that cost- the node's running the core software and actually providing the decentralization of our network. In most other chains, the compensation for this service is provided in the form of transaction fees, where validators are paid by the network for running the network. + +[24:00] It's a reasonable model, but it has one glaring flaw in that now validators are directly incentivized to ensure the network is maximizing their profit returns for running the network versus using the network. So in this world, high fees and inefficiencies are a good thing for validators. On Stellar, this struggle is nowhere to be seen because the network validators have the same incentives as the users they derive profit from their own projects using Stellar, versus those validators actually running Stellar. Validators are not paid by Stellar. They are paid same as you to build profitable businesses on the network. This fosters a network of low fees and innovation for improving network efficiencies. When we all make money the same way, we're all aligned on the same mission. We have a recipe for consistent, reliable and an accessible network, for building long term legitimate, profitable businesses. Finally, + +[25:00] Let's move away from theory to some practice. What does an NFT on instiller actually look like. What are the components necessary to pull one off? There are three primary components: an NFT issuing account, the NFT metadata or the link to the asset. And then there are there's NFT issuing criteria or the smart contract, things like timed auctions. The royalty payments will be building today. Also things like holder restrictions if you have different controls on your asset. I'm going to walk through the anatomy of an NFT on Stellar quickly, so we're going to jump off of our slides for a moment. So this is over. On lightmint, this is one of my NFT projects that I built way back when I was getting started. It's just a very simple algorithmic created + +[26:00] Pixel glyph. There's some neural network that's running. You can see, did a little phrase and it'll build you an interesting NFT. So it exists as an image, a title description. It's got some tags and stuff. If we scroll down here to our details, you can see the ipfs hash right here which would hold. If we were to click on that. it would pull up an actual json file. This is a kind of standard way of describing an NFT from an ipfs hash. So it's got that title description, an image link. If we were to actually open up that image link, you can see the picture here on ipfs, if we open up the actual asset on Stellar- so I'm going to open up StellarExpert, which is a blockchain explorer for Stellar- you can see the asset right here. We've got our issuing accounts kind of hidden behind this, our issuing account. Right here you can see that ipfs hash that's been associated + +[27:00] With our issuing account. So a Stellar account issues the NFT. Other users can then open up a trust line for that asset, at which point the NFT can move to that account whenever it's sold. There's a couple other pieces of information here. If you scroll down you can see some of the history of the NFT, some different operations for when it was bought and sold. We've already mentioned the ipfs hash. There you've got that asset code but all kind of going back to that original issuing account and then when you're on lightmint, it's just providing this visual interface to something that exists on chain. So if lightman were to go down. You could very easily spin up some other interface and begin using Horizon to pull in that information, to pull in that data and show the NFT again. So + +[28:00] That particular NFT is really primarily it just deals in those first two elements: an NFT issuing account and the metadata. The issuing criteria or smart contract ability of the NFT when interacting directly on the sterile will be somewhat limiting in that particular scenario. You have the native Stellar DEX for buying and selling NFTs, but that's about it. If you want to add more conditions to the experience of your NFT, you'll need to look beyond these vanilla NFTs and move into the realm of what I'm calling innovative NFTs, where you combine arbitrary smart contract logic with this basic issuing account and stored metadata. For that we turn to a project that I launched in fall last year. So NFT klpl com I've got a couple of innovative + +[29:00] Smart NFTs that I've built. We'll go ahead and take a look at that. I'm going to switch over here again. So I've got a few of them. I became interested in NFTs background August of last year and began just kind of poking around at what they were, what you could do with them, quickly kind of got bored of just making art and putting it on chain and wanted to do something a little different, more interesting, more programmed, and that's where smart nfcs came from. The one that we're going to look at today is called the dig. I've only done three of these, but they're all quite different. So I leave it to you to explore the rest of those. But if we look at the dig, this is a, an NFT experience. So it's like a social experiment experience. NFT. It's this singular NFT. That's this pixel grid, 40 by 40 pixels, and the way that you interact with it is actually by purchasing individual pixels off + +[30:00] Of the grid, so users will come in off pick, they kind of use the interface to poke a couple of pixels, the ones that they want to buy, and then, as this pic, as this NFT kind of, becomes minted as it gets, as a as these plot tokens is what they're called- get purchased, you get closer and closer to the final NFT. You can see right here we've actually got a few left, so the final NFT hasn't actually minted yet does look like they've kind of uncovered the wizard in the middle. So the secret's been revealed, I guess, if I were to log in with my albedo account, which is one of our- if you've ever used metamask, this is kind of similar- you can see my plot token that I purchased somewhere down in here. So there's a couple of different things going on in this particular smart NFT or innovative NFT, and + +[31:00] That is that there is an NFT that, and be minted, but only after certain conditions have been met, and in this case there are two. First, all of the pixels have to be minted for this final NFT to be minted, and then, second, if you actually want to get this final NFT, you have to hold one of these plot tokens. So only plot token holders, only people who participated in the dig, will actually be eligible to hold or claim this final NFT. And so you kind of set up this arbitrary logic within your smart contract which create this interesting experience for an NFT. If you go and read our drop document here you can see I've outlined all of the different criteria. You know what's going on for this NFT, the different operations within the command. If you go and look at, there's a guest that has the actual code for the smart contract. There's actually three different commands in this single smart contract: one + +[32:00] For digging up plot tokens, one for minting, which you can't actually run yet because the NFT hasn't been fully dug, and then there'll be another one that'll open up as well once the mint command has been run, where you can issue yourself for free, essentially, that final NFT. If you hold a plot token, so you can go through these different functions here that actually provide the criteria under which those individual commands can actually be run. So, again, I've done a couple of these. They've all been a lot of fun. To really start to explore what can we do beyond just putting art on chain. You can build these little microcosmic experiences, these little games where, yes, it might be pretty focused, there may not be millions of people that interact with this, but you don't need that if you can build something interesting and something focused, you can get your small community, your people, + +[33:00] To get engaged with the content that you're generating without having to go through any specific third party next. So that's the kalepail collection. It's quite fancy, but it's a little bit too deep for our first stab at an innovative NFT for our time here this afternoon. For that. We're finally going to turn to the afternoon's task at hand, and that's the project that we'll be covering today: an innovative royalty payments NFT project. So I'm going to walk through this real quick before we have some questions again. If you've got questions, make sure you're in that Discord, pop those into the chat. We'll answer those in just a moment, but I'm going to walk through the project that we're actually going to be building today. So this is a royalty payments NFT project. So + +[34:00] I've got an account over in our gorgeous interface. I've got a user account that I've generated. So this is going to be the original minting account for the NFT or the NFTs that this account mints. If you remember, each NFT has its own issuing account. So we're going to go ahead and generate a brand new issuing account which will be the host of our new NFT that we're about to mint. The next thing we're going to need is an ipfs hash, and I'm using NFT storage to host my NFTs, the images, and I've gone ahead and uploaded one here. I'm gonna grab that. We've got our new issuing account right here funded. We'll put in this ipfs hash and then we'll click mint and when we do that ipfs hash will be attached to the issuing account that we just generated up above, kind of bundling it all together and then issuing my user account this + +[35:00] Brand new NFT. So we'll go ahead and do that and this is actually happening right now on chain. All of these NFTs are actually going on to. It's right now the Stellar test net, but we've actually literally gone and minted a brand new NFT on the Stellar test net. So there it is. That's my NFT. I'm going to go ahead and sell this. I'm going to sell it for 100 XLM. You'll remember that we're actually adding a 10 royalty. So what's 10 of 100? It's 10. So this NFT will actually be being sold for 110 XLM. If we go and open up an alternative browser here, I'm going to pull this over. I'm also going to open up browsers + +[36:00] To work with. Let's go ahead and refresh this. We should see that NFT available for sale right there. We can see those NFTs available to purchase. I'm going to go ahead and buy this one. So this is: we will go down 110 here and we should go up 110 for the account that actually owns it. So we'll make that purchase over on this page. We're able to sell it now because we're the owner. If we refresh this browser over here- and I'm actually just going to shrink this down for a moment- we can see that now has 110. If we sell this, let's say we'll do 50. They're going to sell their XLM at a loss bummer, and then we'll refresh this page. We'll should see 55, because it's the 50 plus the five for the 10 royalty. And + +[37:00] If we buy this, what should happen? This account should receive 50 and this account should actually receive five, because the royalty payment is going from the purchase here, where five goes over to the original minter as a royalty payment and then 50 goes to the user who's actually selling. So we should see this go up by 50 and this one should go to 10: 1: 5. And that's exactly what happened. So our contract is working. The NFT is actually being able to, it's being sold between these two accounts, interacting directly with each other, not involving the minting account at all, and yet this account is consistently receiving its royalty payments as the NFT is sold by other users. So this is what we're going to be building today, after a little bit of Q&A. So if you're interested, excited, intrigued, I know I am, I + +[38:00] Know I am, but before we go to that, we're gonna. We've kind of gotten a taste, we've gotten a nice overlook of NFTs in general, NFTs on Stellar. We'll take a quick moment for some Q&A off of our Discord server. We'll take a break for about 15 minutes or so and then we'll come back for our afternoon of actually looking at, the programming, looking at the code. All right, do we have questions off of Discord? Are we good? Let's go ahead and start? Oops, let's start with some Discord ones. Nope, no, in fact, the application we'll be building will not be fully decentralized, for + +[39:00] Sake of making it a lot simpler to, for initiate. But adding decentralization isn't super difficult. So it's, but it's not required for the hackathon. Yeah, sorry. The question is: does the go? Let's go back just real quick to the first question so that I can read that off again. No, just ask me the first question again, just so I can get it for the folks on online. Does it need to be fully decentralized? So the question was: does the application, does the hackathon app, need to be fully decentralized for it to be valid? And no, it does not. It's fine if it's running centralized, so long as it's actually on Stellar. And then the second question was: do + +[40:00] They need to have like a user interface for interact? There needs to be something that we can actually test and use, and building a gui is probably a really good way to do that. If you've got like api docs, that might be sufficient. But there has to be something for us to actually test and play around with. So I would say a soft yes, there needs to be something to play with. Good question, anything else? All right. Do we have any questions from the audience? Anybody have anything pressing on their mind before we move to a break? All right, perfect, we will move to a break and then come back in about 15 minutes or so and start diving into our actual project. All + +[01:04:00] Right, we're gonna jump back to our seats, or you can stand, you know, if you want, all right. So who's ready to start getting into some code? Before we do, though? I've got two slides left that I want to cover about both the technical blueprint for a project, along with the relationship between how our seller NFT exists, and it's like the relationship between our seller NFT and what I'm calling the NFT auth server. So first, the design of + +[01:05:00] Our NFT. So what we're going to build is an application that configures an issuing account, attaches metadata to it, issues that NFT and then finally, obviously, it will require that all sales of the NFT sends back a 10 royalty payment to the original minting account whenever the NFT is sold. Next, how we're going to build it. There's going to be a client interface. So this is the website or marketplace for our royalty payment NFT, and we're also going to build an authorization server. So that'll be that the authorization server, auth server- is a Stellar transaction builder that opens and closes authorization for our innovative NFT, ensuring that royalty payments are in fact being issued whenever the NFT is sold. So, finally, tying this all together, let's talk a little bit more about our + +[01:06:00] Auth server as it relates to asset authorization controls. So there was a question about decentralization, so let's talk about that just a little bit. Seller's design includes account and trustline controls. There are a few different types of controls that assets can have, but in our case we're going to configure full control. Where an asset exists as an entirely permissioned asset, its movement, then its ability to be bought, sold and even held, is entirely under the control of the issuing account. This will allow us to insert our royalty payment logic in between some authorization operations when building Stellar transactions. So the transaction building will be taking place on our auth server. Right now, this auth server will be hosted by us, not very decentralized, but, for those taking notes, this liability can be removed right now via an ecosystem initiative called Stellar Turrets, + +[01:07:00] Which takes the auth server smart contracts that we'll be building and executes them on a decentralized network of servers. Alternatively, the seller development foundation, SDF, has recently announced an exciting new initiative code named jump can it project jump canon, which will bring this auth server functionality straight to layer one. So in a word, smart contracts directly on Stellar. With our NFT design understood and the foundations of asset authorization laid, we can finally turn to our code editor to begin walking through the repo containing our innovative royalty payments project. So I'm going to switch real quick to mirror my displays so that I can code here, so + +[01:08:00] Inside our project, and I'll give you the repo if you haven't already found it yet. Some of you aggressive personalities probably already found my repo. But I'm going to be walking through the repo. I'll share it with you afterwards, but try and follow with me as we walk through the project that we have. And then when you get your own version of the repo, you can kind of dive through what we've got. So we've got two different directories here. One is our client and one is our server, the client obviously + +[01:09:00] Being the front end of the application and the server being the back end auth server for our application. Let's go ahead and start on the client side, so this would be the website when we load in something like our application. Not sure why, that's there we go, and so let's walk through that. I've put some comments in the most used parts of the application, so I'm not going to talk about css or html much today, so we'll go ahead and close those ones up. What I want to walk through more particularly, are the actual methods that we're using to mint an NFT and then offer or trade the NFT. so those are the two primary functions happening in our frontend + +[01:10:00] Application right, where you can mint a brand new NFT and then you can offer it for sale, you can buy it. So there's really just two interactions. But the first thing, before we can even get to that minting of an NFT, is when we actually click generate for creating a new public key. So inside of methods, I've got three javascript files that I want to walk through, the first one being creating an account. So this is triggered by the buttons that you click when you generate or regenerate, kind of create a new user account, and that lives inside of this method right here. So let's walk through it. And a lot of this is like once we've covered one of these. The way that you build Stellar transactions is kind of always the same, so it'll start to feel a little bit repetitive, but that's a good thing. When we initially create an account, what we need to do is generate a brand new key pair. So 910. Here we generate a new public private key pair for Stellar and + +[01:11:00] Since we're on test net, we can use- and to go ahead and create that account so that it exists on changes. Because you have a valid public private key. Fair doesn't mean that it automatically exists on chain. You need to fund it. You need to get it into a state where it exists on chain, it can begin to receive or be the issuing account for an NFT or hold NFTs. So we're using friendbot here to do that. Where we actually get some free- it's kind of like a faucet if you've ever used those before- get some free test net lumens. So we call that. Then we're going to load up the account to get its status as on chain. So after we've created it, we can then call it to load up its current state on chain. If we're clicking the button where it's just generating a new user account, we go ahead and exit out of the then statement and we just say, super duper, thanks for the new account and I'll assign it to my user. That's what happens when you click on this + +[01:12:00] Generator regenerate button right here. If, however, you are actually creating a new issuing account, we need to do a little bit more. What we need to do is create the account and then we actually need to associate a brand new signer to that account. So Stellar has a multi sig built right in at layer one where you can attach multiple signers. When you initially create a public private key pair, the secret key you create for the public key- those two things are connected. When you go and fund it, that private key is what gives you access to do anything with the public key: make payments, send operations, etc. But you can also associate other private keys, other keys, to that account, where they also will have control over that account. And in our case that's what we need to do. Remember, every NFT has its own issuing account. So in your NFT project scenario you've got dozens, hundreds of individual + +[01:13:00] NFTs, but each one of those needs to be controlled by this authorization server where it's kind of inserting itself in the middle saying like, oh, before you do anything, we need to make sure that if the sale is happening, that an NFT, a royalty payment, is going back to the original mentor. You can't just do whatever you want with this NFT. There's an authorization server sitting in the middle that's ensuring royalty payments go out whenever it's appropriate. And so for that check to happen, you need a signer. But rather than storing hundreds and hundreds of secret seeds in your database, we're going to attach one to every single issuing account. So every single new NFT that we mint, we're going to add this same authorization server signer that will now act as the intermediary, sort of saying: okay, I have permission to sign for this NFT anytime you're trying to do something with + +[01:14:00] It. And so before that can happen, we need to add this signer to each of these new issuing accounts, and so that's what we're doing. Right here, on line 28 through 33, we're adding this new set operations operation. So right here in this block, in this if block, we're building a new transaction for the new account that we just created. We're on testnet. Here you can read a little bit more information about fees or network passphrases, and then on this transaction we're adding a single operation, and this is the set options operation, where this signer is added. This signer comes from an environment variable on the client side, which is the public key for the auth servers signer, that auth server signer again being that single signing key that will be attached to every single new NFT that's minted. So now the auth server has control over every single NFT that's being issued. Hopefully this is sending off some red flags in your mind. This is where the decentralization + +[01:15:00] Piece will come in right. You don't want one account controlling every single NFT on your platform. So in the Stellar Turrets model which I mentioned before, you actually add lots of different signing keys, each holding a different weight of the threshold, right? So if the account has, let's say, a weight of 10 and each of these signers has a threshold of one. You would need 10 to agree before anything could happen with that NFT. In the on chain smart contract model, the actual account itself would be controlled by the code. There wouldn't necessarily be some signer, it would be the contract itself which would control the account. The contract would act as a signer, and so this right here. Super useful, really interesting. But this is where the decentralization piece becomes really important. If you were to try and ship this more widely, moving on, after we build our transaction, we can then get it signed and + +[01:16:00] Then submit it to the network where this issuing account now has the signing key for the authorization server. At that point our account is fully ready to have NFT data associated to it and that's where api mint command comes in. So we move. After we create our account, we go to api mint. This is what comes in. Whenever you have your ipfs hash input, there will be a button for minting that will pop up. We don't want to mint until we actually have an ipfs hash. So if we actually were to click this, I don't think abc is probably a valid ipfs hash, but my check is not particularly intelligent. There's something here, and so then the mint button becomes available. That button fires off this api mint function. Inside. Here we're actually all we're doing + +[01:17:00] Is calling our authorization server because we need to set up this new issuing account with the ipfs hash that we want to attach, which becomes the representation of what our nrft actually behaves as. So this we're going to look at this on the server. But this is a fetch command that's made to our auth server. We're going to pass it- the user account, the issuer account, that ipfs hash that we input, and then it's going to go to the auth server is going to make server. check, make sure everything looks fine. It's going to the out server is actually going to build the transaction. So we don't build the transaction on the client side. We want to really be sure that the transaction that's being built is exactly what we want. It's kind of the building a smart contract on the fly, if you will. So the request goes to the server will build this XDR will build this transaction, the Stellar transaction, according to whatever your smart contract logic is, and then send that + +[01:18:00] Transaction back to the client for any final signatures that might be needed for that transaction to be valid. In our case, the user needs to sign it, and we'll look at why that's the case in a moment when we go to the server to see what transaction is actually being built. Finally, then, just like before, once it's signed, we can submit it to the network. At this point, I think it's probably worth actually going to the server and looking at what's going on here. There's not a lot that's going on inside of our server. We have some environment variables which actually denote those signer secrets. There's also a sponsor secret, which we'll look at in a minute, but this is the signer secret. This is the actual signing key. That will be the authority for all of the NFTs that are issued from our project. This is the signing key that will be added to any XDR in each Stellar transaction, XDR, seller transaction- same thing that will be generated from the back end of our application, + +[01:19:00] From our auth server. We've got an index here, which is our router, just a very simple collection of some routes. You can see contract right here and then, based off of the command of mint or offer, it'll go here, and then there's a little switch statement: if it's a mint, it runs the mint function. If it's an offer, it runs the offer function. And those exist inside of the contracts directory. And so let's go ahead and look at our mint function here on the server, and so this is going to look kind of familiar. Remember that the Stellar SDK that we're using, this Stellar base in this case, works on both the client and the server side. So what you will build when you build transactions on the client, it'll be very similar to how you build it on a server. We go ahead and instantiate the function. Here, this mint function, we can gather our arguments. You'll notice these look very similar + +[01:20:00] To what we passed in on the client side. We gather those, put them into some consts. We instantiate NFT as our asset. This is going to be the NFT asset that we're actually going to issue, that this issuing account is going to send out. It's going to be called NFT. That'll be the code for it. And then we go and make a quick fetch on Horizon itself looking up this account got to make sure that it actually exists. And then, once we pull in that information, we can begin to use that account's information so that the user account- they're going to behave as the source for any fees that are incurred when we actually submit the transaction to the network. And then, once we have that, it's time to actually begin building out our transaction. So Stellar transactions exist, kind of these envelopes that you put operations into. Transactions on Stellar are atomic, which means you can put lots of different operations into a Stellar transaction, this + +[01:21:00] Kind of chain of events- and either all succeeds or it all fails together. You don't kind of have a partially successful Stellar transaction. The whole thing succeeds or fails together and this allows you to build some really complex and relational logic in your transaction you can build. You can include up to a hundred operations, so that's a lot of logic that you can include into a seller transaction and again it will all succeed or fail together. You kind of. You don't have to do this like oops, it failed. At this point now we need to like reverse, go back up the chain and undo whatever we did before. And this becomes really important when we start getting into the offer contract. Right here we've got a collection of these different operations inside of our mint contract and the first one you'll notice is set options, where we are setting flags, an inflation destination and then the source, the flags here. This is where we're actually locking down the issuing account. We're saying this asset is an authorized asset, it's a controlled asset. No one's allowed to do anything + +[01:22:00] With this asset unless the issuing account permits it, unless we say so. You can look. There's a couple of different flags that you can set inside of the set options. You can look those up later. But 15- that's kind of our max. Right now we're I'm allowed to do anything. Is the issuing account I have full control over this asset. The inflation destination: Stellar used to have an inflation mechanic that was retired by the ecosystem, decided that it wasn't all that helpful. But there's still a field that exists on Stellar accounts that you I'm kind of now sub opting I'm taking it over and I'm using it to store the address for the original mentor so that when we come back in the future and we're trying to make offers, I know who am I supposed to send the royalty payment to. This is where I'm going to store that. So I'm going- I'm gonna go ahead and include it right here. The user account, the original minter. Next up we've got a manage data operation where that ipfs hash is being stored. + +[01:23:00] Nothing fancy going on there. You've got a name and a value, so ipfs hash. So we know what it, what key to look for, and then the value of that. We're going to change trust. If you remember trust lines. This is where this is going to come into play. You've got, we're going to put this on the user account. Now is opening up trust for the NFT right? So the issuing account in this minting transaction is going to, the NFT is going to exist, but a user still needs to trust it. The user doesn't hold the NFT yet. The issuing account needs to send it, but it can't send it unless the user trusts it first. So that's what we're doing here in change trust. We're opening up a trust line for the NFT. Next there are trustline flags. This is where this is kind of the magic part of an authorized asset- anything that exists between the set trustline flags- authorized true and authorized false. This is kind of the. What do I want to allow that I couldn't, if this asset were not authorized. So because + +[01:24:00] This is an authorized asset now, I'm allowed to insert arbitrary logic in between. In our case it's a royalty payment, so I'm trusting the user account to hold this asset. Yes, they're allowed to, and inside of this authorization I'm making a payment. I'm going to send to the NFT. now that it's trusted, I'm going to send the NFT back to the user for one NFT that they're going to hold and then I'm going to lock that asset back down immediately so they can hold the asset exists in the user account, but they're not allowed to do anything with it. They can't sell it. The only way they're going to be allowed to do anything else with this NFT is to go back to the issuing account and get some authorization to do something with it. So at this point again, because it's atomic, you kind of have this chain of logic that's going down of these different operations that are happening. We open up the trust line, we authorize it, we send the payment and then we de authorize it. So the user now has it once + +[01:25:00] This transaction executes, but they can't do anything with it. Finally, then we've kind of bundle up this whole transaction into the XDR that we send back to the client. So this is where we take that transaction, we convert it. XDR is kind of like json, it's a little bit more compact, but same sort of thing where you're just taking some instructions and encoding it into a nice little package that you can send back to the client- in this case. So that is our mint method, and so at this point, once we've minted an NFT, the user holds that NFT, they can't. The original minter holds that NFT, they can't do anything with it except whatever the authorization server allows for. And the only thing in this case, in our case, that the authorization server allows for, is this api offer function. And this is where you can start to get really creative, like minting. The process + +[01:26:00] That I've used for minting will kind of be the same. You need to have an authorized asset that you can now control what it's allowed to do, so that people can't just bypass your royalty payments or whatever other logic you have. But after it's been authorized, after you know that you have an asset that is off, controlled, now you can insert whatever kind of logic you want. In our case, we're going to do royalty payments. But you've seen, previously I had a whole bunch of random, arbitrary logic inserted between authorizing open and closed in different transactions. So this is where you're really going to start to look at what kind of creative, interesting things do I want to do? In my case it's royalty payments. So let's look at that when you actually go to on the client side. So we're back inside the client application. We've got this api offer method, so let's just walk through it briefly. You've + +[01:27:00] Got a sell side or a buy side. So you've two things that are going to happen when you have an NFT. Either you're going to be selling it Those are the only going to be buying it. Those are the only two things you can do, and if you're, the only way you can buy it is if somebody else is selling it, and so we're going to assume that if you don't have it, that you're. If you do have it, that you're selling it, and if you don't have it, you're buying it right. So there's, it's a one of one NFT and so if it does exist out there as an offer, you're going to be trying to buy it. Otherwise, if you have it, you're going to be trying to sell it. And so here, if you're selling it, you can enter the sale price for it. I don't actually have any NFTs at the moment. I think if I go to one of my, I think this one holds it at the moment, so I can go and sell, that little modal pops up. We'll put in that hundred and then this one will be for sale and we can move into + +[01:28:00] Seeing that on this page here, where we have the option to buy it. So when we sold that right, then this is the modal that popped up. We're on the buy. We were on the sell side, we configure our price, we make sure that the price is actually something valid. We need to go and find the issuing account, because that's the account that's going to hold information about who the royalty payment should be going to. It's also going to include what NFT are you selling and do you actually own it? Do you have the right to sell it? Next up, we actually call the auth server contract offers. We have mint and we also had offer. This one is offer. We've got a couple of different arguments here. You got your user account, issuer account, the offer ID. You can delete open offers, otherwise you'll be creating a brand new one. The price that you're inputting- in our case that was 100- and + +[01:29:00] Then, depending on whether or not it's a buy or a sell, you're either going to be buying an NFT or you're going to be buying- I guess XLM right. So it's always going to be putting out this offer of: I want to give an NFT and receive XLM, or I want to get an NFT and give XLM. These are some of these. Things are hard coded if you wanted to get more complex and do other assets, you absolutely could. It doesn't have to be done in XLM. You can use other assets. There's loads and loads of them, as you saw previously in our example. To keep things simple, we are doing XLM for NFTs, but just be aware that it's not that difficult actually to open up access for assets other than just native XLM. Then, just like before, we're going to make that call to the auth server, everything's going to. We're going to check everything, make sure it's all legitimate. Then that auth server is going to build a transaction and send that XDR back to the client, will then sign that transaction + +[01:30:00] And submit it, to sign that transaction and submit it to the network. So this point, we should actually look at that particular contract over on our server. So this is the this one. Right, here is the offer. So there's a little bit more going on here, but it's not still not that complex. Essentially, we're trying to achieve the same thing. We're trying to build an XDR which is either going to sell an NFT or buy an NFT, depending on this, the state and the requester, the actual user account that's coming in and the whether or not the that user holds or is looking to hold the NFT. So we set up some very basic checks. We get the parameters from the request, maybe some of those comments, and then what we do here- very similar to what we did before we go to the Horizon, we load up + +[01:31:00] The account. The user account is going to be the source of this transaction, so they will consume any fees again for this transaction. Then we're going to look up the NFT balance for the user account. Again, this is going to be this toggle that, if you hold the NFT, you're going to be a seller. If you don't have the NFT, you're going to be a buyer. We need to make sure that the user, regardless of whether or not they're buying or selling, like you, can't do anything with an NFT if you don't hold it. Remember we need that trust line opened to be able to do anything with this NFT. So the very first thing we're going to do is check. If you don't have an NFT balance, we definitely need to open up a trust line for that. So we do that first. The next thing, again: regardless of whether you're buying or you're selling, you need permission from the auth server to do anything with this asset. And so we're going to open up that trustline flag again and say: as long as this trustline flag is open, so long as you have permission, you're authorized. + +[01:32:00] Anything that comes next you're allowed to do. And then we'll lock it back down again at the end. And so we open up that kind of regardless of whether you're buying or selling, you're going to need permission. So we're going to open up that authorization and then we do the toggle of whether or not you're buying or you're selling. If you have an NFT balance and that balance is greater than zero- if you hold the NFT, then you are going to be trying to sell it- we're going to grab- and we're going to begin so- the sponsor account. We'll get to this in just a minute. This is actually something very helpful when we're starting to deal with Horizon and showcasing data, so we'll skip that for just a moment and come back to it. But if you're a seller of the NFT, the thing that you, that we really need to be doing, is manage a sell offer. So there's a Stellar operation called manage sell offer, where you take a selling + +[01:33:00] Asset and a buying asset, an amount that you're trying to sell and the price that you're trying to sell it at, and then the offer ID, in our case, is going to be zero. If it's not zero, you're going to be deleting the offer. But if you're creating a brand new offer, then that will be zero. So, essentially, you'll be selling the NFT, buying XLM, you'll be selling one whole NFT for whatever price you set on the client side, and then we're going to close that sponsorship again. We'll come back to that in just a bit- and so that would be the if statement. If you are selling an entity, the only thing that you need to do if you're selling the NFT is get the NFT up on the Stellar DEX to actually put it up for sale so that other people can see it for sale and then offer to buy it. And so if we skip this other else statement, the only thing, the only other thing that exists in that transaction is to close the trustline flags that + +[01:34:00] We opened up at the very beginning. We closed that. So anything that happens in between, in this case the only thing that's happening is the offer to open up that offer. We lock that down. Now the offer exists on the DEX but you can't do anything else with it. You've been given permission by the auth server to put that NFT up for sale, but now you can't do anything else with it, and so that XDR, that transaction goes back to the client and then it can be signed and submitted. So if we were actually that's what we did over here when we created the offer you can see delete offer- that would obviously go take the offer ID, send it back up. Hey, we're going to delete that offer. It's removed. If we go back and we refresh, we'll see that there are no NFTs available to purchase at the moment. So let's go ahead and put that back up. So now we've got the ability to sell and delete offers as an owner of an NFT. But what if we want to buy an NFT? So we've got? Either we don't have a trustline open yet for this NFT, or we do, but it's zero. + +[01:35:00] We don't actually hold the zero, yet. In that case, we're inside of our else statement, right? So we've got. Either we've got no NFT balance or the balance is zero, less than zero. That's when this else statement will fire off. And so in this case, what do we need to do? This is where things get a little bit interesting with our inflation destination. When you hold the NFT and you're offering it for sale, there's no royalty payments involved, right? I don't want to make a royalty payment if the NFT is not actually changing hands, if nobody's actually purchased it, if it hasn't swapped, so in that case no royalty payment is being made. The only time we actually want the royalty payment to be made is when a payment is being made, when the NFT is changing hands, and in that case we need to look up the inflation destination. That comes again from our issuing account. It's stored inside of the inflation destination from when + +[01:36:00] We were originally minting, and so we grabbed that inflation destination. That's what's going to be used in our royalty payment here. As the second op, but the first stop, we're actually using a path payment strict receive. You might wonder why we're not using just a counter offer, and that's because we have to be absolutely sure in this case, that and that the NFT is actually changing hands, right? Can you imagine if we actually made a royalty payment using an offer, but the offer wasn't actually taken, but the royalty payment was, so you would lose your money from your royalty payment but you didn't actually get an NFT. Offers don't have to be taken necessarily, especially if, like, the offer maybe existed but they deleted it right as soon as you were trying to take it. So your offer went up to buy an NFT, but there now is no NFT to take. But the royalty payment would still go through if we were just making an offer and now there's an offer to sell the NFT, but nobody's at that point in time selling the NFT but your royalty payment got + +[01:37:00] Taken. That would be unfortunate. So to cover that case, we're actually doing a path payment, strict receive, and this consumes an offer. It is guaranteed to consume an offer for whatever criteria you put inside, and if there, if it doesn't succeed, if you don't meet the criteria of the path payment, the actual transaction will fail, and so this is a really useful way to ensure that the NFT has actually changed hands before the royalty payment is consumed. So in this case, what's our send asset? We are sending XLM and we're sending up to the price for whatever the NFT exists, as in our case it's a 55 lumens, I think at this point actually it might be 110. But regardless, that's the max we're willing to send. The destination is our user account The user is actually doing like a self pay. If you ever done a market order on coinbase or something, this is kind of what we're doing. We're essentially doing this like auto swap of an asset. We're making + +[01:38:00] A payment to ourself where an XLM goes out and an NFT comes back, the destination asset. So what do we want to send XLM and then to receive. We want to send XLM and then have magically come back on NFT and we have to receive. We required, we are required to receive one NFT, so the whole NFT. And if we don't, then this operation will fail. The whole thing will fail. Nobody will receive royalty payments and maybe we'll try again, maybe the seller of the NFT is pulled back. They're not actually selling the NFT anymore and the whole thing fails. If we do not receive this one NFT, then nothing else will succeed. The path payments can be used for other things where you can have like multiple paths, multiple hops to go through. In our case we're just doing XLM to NFT, so we don't need any paths and then finally that source again we're kind of paying ourselves using XLM and receiving an NFT. This is a way to consume an open offer, but guarantee that we consume that open offer, otherwise the + +[01:39:00] Whole thing fails and we don't accidentally make a royalty payment without receiving our NFT. Once we're guaranteed that we now have this one NFT, it's safe for us then to make a royalty payment again, because this is all atomic. This whole thing succeeds or fails together. You can't sort of time, it or interes, intercept this transaction and say like, oh, I got my NFT, I'm going to kill this transaction and not make the royalty payment. The whole thing succeeds or fails together and so at this point we have our NFT in this transaction. At this point it's safe for us now to make the royalty payment. We're going to make that royalty payment to the influence inflation destination for the selling asset. In this case it's enforced to be XLM and what's the amount going to be? whatever the sale price was times 10. So if we're selling for 100, it's going to be 10 XLM. Once that payment is made, we can then carry on with that logic of closing down the trustline flags. The + +[01:40:00] User now holds that NFT but they are not allowed to do anything with it. So we're kind of back where we started, where some cool arbitrary logic has happened. In our case, royalty payments has been enforced, but now we've locked down the asset. Nothing else can happen until the user then comes back and calls the offer api endpoint again and we offer it for sale again and then someone else can buy it again, but the inflation destination being that thing that's always going back, that royalty payment, always going back to that original inflation destination. And so at this point when we come back into our api offer again, they get that XDR back. The user will sign it. The user needs to sign it because they're making a payment and we need their permission to make that payment so that user will sign with their key pair. If you're using something like albedo or one of those delegated signing services like metamask, this is where that modal might pop up and + +[01:41:00] They confirm that they actually want to sell this. I'm encoding, putting those secret seeds in local storage just to make things a little bit easier for the user. But if you want to have confirm logic, kind of like that web 3 stuff, you can use those different delegated signing services where that secret account is a, c, that secrecy is managed by the user. All right, so at this point we kind of have really all of the logic through just those two functions and that original minting where we assign the signer to the auth signer to the new issuing account. There's not a lot of logic here. It's actually quite simple. You might, you'll probably want to do more interesting things in your project, but a lot of times- and this is actually true in most smart contract programming- there really isn't a whole lot of fancy stuff going on. There might be some complex math for some of these bigger DeFi projects, but a lot of this isn't very complex. You've got + +[01:42:00] A client service, a user interface and some really basic logic. The thing that's really innovative, is when you add decentralization, when you have multiple machines running these lot, running this logic, to where you can't just shut this down. And now, anytime this auth server is called, if it's being run by five different machines, one decides I don't want to run this anymore. The four others can continue to run it. Or in the case of a decentralized, on chain smart contract, there's thousands of machines that are running this and you can't shut it down. The account is actually controlled by the machine, not by a single third party. So it's not that the functionality, the contract logic, is that complex, it's that it's decentralized. There's lots of people that are like okaying this, that are putting their sign, their signature stamp on this, saying yes, we approve of this transfer of this asset. I mean most of these are less than 100 lines of code and you can get away with some really cool stuff, in this case, enforced royalty + +[01:43:00] Payments for an NFT. That can just exist. It's out there, it's on chain. Any interface that wants to show it and sell it can use the same logic to accomplish that. It's quite fantastic and, again, not that complex. There is one more thing, though, that I would like to cover, and that's that it's true in any sort of defy application development, where you're kind of it's not a fight, but sometimes it'll feel like a fight- between the data that's on chain and how you actually build an interface around that data. So you've got all this stuff happening on chain, usually in a very efficient manner. That can sometimes make it really hard to showcase that in an interface. So in our case, we have a bit of a challenge that I haven't discussed, and that is actually showing NFT, NFTs on the interface. So when you have this UI, how am I supposed to show? How + +[01:44:00] Am I supposed to show just the offers for my NFT project, just the offers that are for royalty payments. Why aren't all of the NFTs on Stellar showing up in this interface? How have we gotten it to where it's only showing ones that are involved in my project? Well, the way we do that, if we look at app view again, there's a method here called update offers and this is going to- you guessed it- update offers, that array of different NFTs and this is an actual Horizon api call. That's getting offers from the Stellar blockchain. So if we were to go to laboratory- Stellar laboratory- really quickly on test net and explore the endpoints here, we can actually look at what this api endpoint that it's calling this one right here. All offers, now, obviously all the offers, is not what we want. We don't want all of the offers, because that's going to be literally all of the offers on sellers. These are not just the ones + +[01:45:00] That are involved in my NFT project. So how am I going to filter all of the offers on Stellar to only show just the ones that are involving my project? We might think you could filter by, like, some sort of NFT code or something and that's actually a. It's a good assumption, so it's not a bad one. You could maybe use like NFT code or some special code. The problem is now you're open to an attack vector, right? So if you used the NFT code where you just said, like NFT, any asset that uses a code NFT is going to be part of this project, or you could, you know, use your name NFT. The problem is there's an attack vector now that anybody else like, if they want to like, insert them their NFT into your project. They just need to name their NFT after the naming convention that you've used. So somehow you need to use something that's like official, that's secure, that only your project knows, as a filter mechanic so that nobody else can insert themselves and say + +[01:46:00] Like I'm going to issue an NFT, that's actually a scam- and get my way into your project. This is a real legitimate concern. When you're building projects in open on a decentralized network, it becomes kind of tricky because everybody knows what your project looks like, everybody knows what your asset codes are, and so you have to use cryptography. You need to use something that's like signed, something that only you have permission to do to be able to build out filter mechanics for your UI. So in our case what we're doing is using. Sponsorship is something that exists on Stellar where a sponsor will say: whatever the fee is to put this on chain right, so you have to stake a certain amount of lumens to hold things on chain. So an account is one lumen, trust lines and offers are half of a lumen to actually have them exist on chain on Stellar. And so a nice mechanic to offload that from your users is to use sponsorship where your service account can + +[01:47:00] Say like, hey, I'm gonna, I'll take the can for you, I'll sponsor those things for your user account. So your users kind of insulated from those fees. But another nice feature of this is that sponsorship can only happen if it's signed for. You can't just add a say like, hey, you sponsor my account, please, it has to be signed for. I need to give permission for your account to be signed for so we can actually use this functionality to sponsor the offers for these NFTs. Now they're signed for, so that when we go to Horizon we say I would like to get all the offers, I only want the offers that are sponsored by a specific account. And that specific account is something that I, the auth, the authorized account controls, and so that authorized server controls the sponsorship for each of these offers. And now, when we filter, we only get the offers that are sponsored by our account. And so if we go back to our code here inside + +[01:48:00] Of our offer logic, that's what this begin and end. Sponsoring future reserves operations are where we're sponsoring the user account with our sponsor account, and this is coming from an environment variable, very similar to the actual signing secret, the signing server that was controlling the NFT. This is a sponsor signer that's going to sponsor the offers for these NFTs and so by wrapping this managed sell offer inside of this open and close sponsorship, we now have a mechanic, a secure mechanic by which we can filter our front end to only show offers that have been issued or that have been sponsored by this particular sponsoring account. And you're going to see, like there, there's only so many. There's a lot of these like little tips and tricks that can really help building out your front ends. But you're going to find that + +[01:49:00] If you're not careful, building in public like this, building on a decentralized chain like this, there's a lot of little gotchas. And this is really true on like fully on chain platforms where it's I mean, there's just so many little hacks that people can use when you don't control, when it's not a centralized authority, that you have complete control over everything. You're going to have a lot of these little foot guns. But by doing things like this, where you're sponsoring offers and having kind of these little nuggets of control, you now can have the interface, the user interface, that you exactly specifically want. So there's a couple of those. Sponsorship is a really good one. You'll find a couple others if you go through the drop docs on my smart NFT, so kalepale com you go through the drop docs, you'll find a few more. That will really help as you build out and kind of find yourself fighting a little bit between server and client and things might work great. But if you're not careful, somebody else could say like ha, I've found a vulnerability, I found ha, + +[01:50:00] I've found a vulnerability. I found a way to insert my NFTs into your project if it's not fully signed for and secured. So that's just a nice little feature here. You'll find a few more, but that's a really nice one. So with that, there are other pieces here, but they're all commented pretty well. Again, the application's not super complex. You've got a client and server. There's instructions for how to run those. I'm going to move now back to the slide deck. I think this is a qr code for our dev Discord. We're actually moving into the hackathon part now, so hopefully you'll be able to build some innovative NFTs and actually get rolling on some of these interesting and innovative projects. As promised, there is a GitHub repo. If you go to NFT steller buzz there's a link down at the + +[01:51:00] Bottom where you can actually grab the repo for this and clone it and begin poking around, get comfortable with the code, feel free to clone this and use this as the base for your hackathon projects. If you want to again, if you go to those parts where the your opening and closing authorization, that's where you may start to choose to insert your own arbitrary, interesting, innovative logic. With that we will move to any qa that folks have around, anything that we've discussed today, any questions. So + +[01:52:00] The question: is there any mechanic natively on chain that allows you to ensure that you can't issue any more of an NFT without locking the account? So by locking the account, you essentially remove signers from the issuing account such that you can't do anything else with the issuing account in. So if you've issued, if you've said to the world like hey, it's a one of one NFT, but you've left yourself as a signer, you could go actually in the future and issue more NFTs. I don't think so right now on Stellar, without actually locking the account. There are some things you can do with signers that would disable you from submitting specific operations like payment operations, where you would send more NFT so you could lock yourself out of those specific operations through the signing thresholds. But yeah, until we get on chain smart contracts or you use Turrets and give control of the account over to the Turrets network. I + +[01:53:00] Don't think so. It cannot. The question was: can the inflation destination be an array of strings for multiple accounts? It cannot. But you don't have to use the inflation destination to store your royalty payment addresses. You could use so, if you remember, back in the manage data operation. Managed data allows you to assign arbitrary key value pairs inside of an account and we saw we stored ipfshash in the ipfs hash for the metadata for NFT as a key value pair in the metadata. You've got up to 1 000 key value pairs per account so you could store your + +[01:54:00] Royalty payment addresses inside of the set options or inside of the managed data operations for that account and just put a bunch of your royalty payment addresses inside of the managed data operations instead of using the inflation destination. Yes, the question was: can you submit a pre authorized transaction? So now we're getting into the weeds. So there are different types of signers that you can add to a Stellar account- the most obvious one is just other Stellar accounts- but you can actually also add preimages for sha 256 hashes and you can add transactions, so actual transaction hashes. So whenever you build a transaction XDR, a sha 256 hash is generated and you can store that hash inside of the signer array of your account. + +[01:55:00] Sort of saying I give my account. authorization that if you can ever find this transaction again, I give my permission to sign off on it. And so what you could do in this particular scenario is build a transaction that re enables your account, that re adds master signers, that reconfigures the signing thresholds to your account at some point in the future. That would then put your account back into a state where it could issue more NFTs, which could be a kind of interesting scenario, because you could actually flip the script on that a little bit and say I'm not going to lock my account, but I'm creating a pre authorized transaction to guarantee that at this point in the future I will lock my account. So give me six months to figure this out and then at that point I'm guaranteeing to lock my account with this pre authorized transaction that I've like pushed to the public. Everybody can see it and in some, at six months in the future, we will be able to submit this transaction and it will lock the account. Or you can, yes, do the inverse of that and unlock your account with that pre authorized transaction. It's pretty cool feature but definitely an advanced one. + +[01:56:00] Good question, good, any audience questions, feel free. If you have questions for projects you're working on. We're kind of moving into our Q&A building session. There are no dumb questions. Just you know people that didn't ask questions when they should have. Oh, + +[01:57:00] Let me read that one, let me read that one off offline. That one sounds interesting. We'll go ahead and wrap up. We have a happy hour coming up. We're gonna. I mean, the hackathon has already started. So get hacking, keep building, make sure you join our developer Discord. If you haven't, we'll be around all weekend to answer questions. I'll be milling about for quite a while to answer questions, excited to see what you guys are building. Thanks so much for giving me your time. Super bullish, excited about NFTs. Hope you are as well. Thank you so much. Applause + +
diff --git a/meetings/2022-03-15.mdx b/meetings/2022-03-15.mdx new file mode 100644 index 0000000000..446e5d7aa8 --- /dev/null +++ b/meetings/2022-03-15.mdx @@ -0,0 +1,167 @@ +--- +title: "From Bootcamp to the Stellar Ecosystem" +description: "A panel discussion with African fintech founders reflecting on their Stellar Blockchain Bootcamp experience, product growth, and lessons learned building real-world applications on Stellar." +authors: [anke-liu, olufunto-boroffice, opeyemi-woyemi] +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session brings together founders who participated in the Stellar Blockchain Bootcamp to reflect on how their products evolved after building on Stellar. Panelists share how design sprints, hands-on mentorship, and ecosystem support helped them validate ideas, ship prototypes, and unlock new growth opportunities. + +The conversation highlights practical use cases across payments, on- and off-ramps, creator monetization, SME financing, and offline access to digital dollars. Speakers also discuss fundraising outcomes, regulatory challenges, and why Stellar’s low-cost, fast, and interoperable infrastructure continues to support their next phase of growth. + +### Key Topics + +- Bootcamp-driven product pivots and rapid prototyping using design sprints +- Real-world Stellar use cases: USDC wallets, offline payments, creator payouts, and invoice financing +- Growth milestones following the bootcamp, including seed funding and grants +- The role of anchors, AMMs, and interoperability in scaling fintech products +- Value of mentorship, founder networks, and post-bootcamp ecosystem support +- Regulatory considerations when building blockchain-based financial services in Africa +- Advice for future bootcamp participants, including early-stage and non-technical founders + +### Resources + +- [Stellar Community Fund](https://communityfund.stellar.org) + +
+ Video Transcript + +[00:00] Okay, so we're live streaming it right now. All right, we are live. So welcome to today's panel. My name is ankur liu. I'm a program manager at the Stellar Development Foundation, or SDF for short, SDF. Just quickly about: SDF is a non-profit organization founded in 2014 to support the development and growth of the open source Stellar network. And so, before we begin, a little context. So Stellar was founded in 2014, after bitcoin, before Ethereum, and handles millions of transactions each day submitted by businesses, banks, governments and developers all over the world to build innovative financial products and services. And Stellar uses an energy efficient syncing mechanism specifically designed for remittances and payments, so transactions are processed within seconds and cost way less than a penny. So what can you actually do with Stellar? So, + +[01:00] On Stellar, you can issue your own assets as tokens on the network- think stablecoins such as USDC, cryptocurrencies, real estates and NFTs- practically anything. Then you can also trade these tokens peer to peer through a built in decentralized exchange or automated mark maker functionality, and you can also transform currency as you send it, which is a powerful feature to streamline cross border payments. But let's get back to why you are all here today. We have launched a blockchain bootcamp in partnership with DFS Lab to support the boom of blockchain crypto activities across the African continent. This was in fall 2021 and each content- each company spent a week at our virtual bootcamp going through the design sprint process with the guidance of a mentor and also access to technical support from the Stellar development seller, developers, and at the end of the week, everyone came together for a virtual demo day, with select + +[02:00] Investors invited to join the audience we have like. In total, like nine companies were selected and went through the bootcamp, and we were definitely not disappointed with the results. They participated, they built a proof of concept, they demoed at the demo day and or each earned an award valued at either five or fifteen thousand dollars, depending on the depth of their solution. So I'm very excited for today because we have five of those nine companies joining me to discuss their growth and also some tips and tricks for future blockchain particip bootcamp participants. So, without further ado, I wanted to start some introductions, starting with open hi. + +[03:00] Thanks, ankit for introducing me and hello everyone. My name is ope and CEO and co-founder of bonafide I'm gonna find stemmed out of another product called money me and I would give most of that credit to the SDF bootcamp, and that we had nice to meet you, higgins, awesome. Well, that's great to hear- great. So next up is Ben from stacks. Hey everyone, I'm Ben, I'm the CEO of stacks, and stacks is a universal payment app for Africa where what we've done is given you one app where you can link all of your bank accounts and mobile wallets to a single interface where you can then transact without a data connection. That's really our big kind of unique selling point is that it works offline. And then what we're doing with Stellar is adding support for crypto, where we'll have a self, self custody multi token kind of strategy so think like metamask + +[04:00] But built for Africa alongside your fiat and integrated with your fiat accounts I'll stop there awesome thank you for introducing yourself then phone bank christian and michael hey everyone and I'm Chris duffis I'm the founder of phone bank and we offer a unique on ramp where we bridge the less than one percent of the world population within nearly 100 of the world population that have access to a mobile phone and we really enjoyed our experience at the dfs bootcamp and the Stellar product we think it has a great applicability to reducing friction as in our marketplace awesome well thank you for introducing hey everyone I'm super excited to be here I'm David Nander founder and CEO of honeycoin I + +[05:00] Always say I found it to honeycoin I it's sort of like if coinbase and patreon had a baby and that's maybe Africa so yeah we're standardized monetization platform for both creators and consumers and basically we exist to make the payments for goods and services to creators and the transfer value across ecosystems easy and frictionless thank you for introducing yourself and company cuidro Funto please introduce yourself sure thanks nk credo is an online platform factoring platform actually I should first of all introduce myself so olofontoboroffice co-founder of creature which is an online factory platform where transactions are streamlined and our smes on the African continents can get paid within 72 hours by selling their invoices to get fast cash at favorable rates and we had a great time at the bootcamp where we're seeing the use + +[06:00] Of Stellar for our seeing the use of Stellar for our product is the fact that we're looking for investors we're going to be looking for investors to invest in buying some of these invoices and we're going to be looking at using them for cross border payments as we sort of look at transforming currency and peer to peer so thank you for having me I'm excited to be here awesome yeah we're excited to have you as well last stop last but not least at DFS Lab Joseph yeah hi everyone Joseph from dearcuslav here so the fs lab is an early stage venture capital firm where we invest in digital commerce companies on the continents very early pre seed and seed stage and we also you know work with institutions like the Stellar foundation to kind of onboard companies to like play around with innovative technologies like Stellar yeah excited technologies like Stellar + +[07:00] Yeah excited to be here thank you yeah we're so excited to have you and to and thank you all to the participants for introducing yourself and your companies even like just rehearing what you're building on is just like it's really great to hear and I'm like also like the way you are growing and the way you are in really innovating in the space is magnificent so I guess I know all of that but the participants don't know yet so you know right now I'm going to start stop sharing my screen and I'm going to go into the discussion and for all these though you watching in the zoom there will also be you can also ask questions and I will see if there's some time to answer but without further ado let's head into the discussion so I will start with stacks with Ben can you quickly talk us through your solution that you've prototyped during the previous bootcamp yeah so in + +[08:00] The last bootcamp we came in really with just the question of how do we step our foot for or just get our toe into crypto we knew that we wanted to do something with USDC and what we ended up building in the bootcamp was a prototype of showing the ability to create a USDC wallet self custody wallet and to send money over USDC in stacks without using a mobile data connection and so what's kind of novel there is that we our core technology has this ability to automate USSD so just for our friends on the west who might not know what that is it's this legacy gsm channel it's ubiquitous across Africa and asia and it basically is using part of the voice channel to transmit data and so what we've done is we've used that technology in an app to make services work offline and even though you're actually having a synchronous connection with the server we've done that with over 100 bank and mobile money + +[09:00] Services today and 10 African markets but we wanted to start offering crypto services as well and give our users basically a us dollar account and so that's where Stellar and USDC came in and so what we demonstrated was just turning off so at the end of the bootcamp you turned off mobile data on your phone open stacks created a USDC wallet and sent USDC to another USDC address all with mobile data entirely disabled wow that's really impressive I think like this is one of the key like issues also like having being able to provide offline services is really key especially in regions where infrastructure is lacking so definitely so you know can you tell us a bit about you know your plans for launch and also a little bit so I saw that you've recently published that you've acquired 2 million dollars in seed funding which is amazing congratulations yeah honestly congratulations and + +[10:00] Like kind of like how has your company grown so like kind of what are your plans for launch and what is that what is what does growth look like already yeah so we're getting ready for a big push so come April that's when we plan to actually implement the proof of concept we built in the lab or in the bootcamp and so April we intend to push that live and really start marketing from may at least in Kenya and a few other markets we're still figuring out which markets to start with but from may you'll see that stax users pretty much anywhere on the continent will be able to create that USDC account and send money and receive money without data where we want this to go is really kind of like a metamask for Africa kind of built for Africa play and the idea there is self custody so you have your keys we don't touch any of that and then the goal is that you'll be able to buy sell you know swap hold all of your tokens you can take + +[11:00] Them from stacks if you want to any other web3 service because you've got your keys but the goal is that you can do that all in stacks right alongside your bank accounts and your multiple wallets and so then the big lift for us over time is going to be how we stitch those things together so that you can have a seamless kind of fiat crypto to fiat experience whatever token you have whatever fiat accounts you have so technically us launching that's going to be very quick from me but where the hard work is going to be in stitching everything together you know based on Stellar anchor here or hack there or whatever so that's really our plan great yes and then you know I think that's really exciting and then also like having acquired the 2 million dollars in seat funding do you want to give a few more details about that and how that came to be yeah shout out to nomis ventures to world within ventures launch Africa ventures orange down some others that participated it was really + +[12:00] Useful actually coming out of the bootcamp to just show that we were serious about our ambitions to get into crypto and that we had kind of we were able to get a kind of trust mark or a seal of approval from the community and so that actually really helped us raise money and actually got us in front of investors we hadn't spoken with before so if you've everyone here has been fundraising too often I'm sure but like when you're pitching traditional vcs that's one kind of segment or slice of the pie and then when you start pitching crypto that's a whole other big slice of pie that wasn't available so that's been super helpful to us awesome well congratulations and I think like another big player here that has like received has started quite a lot of seat funding is phone bank so Chris and michael I would love to bring you to the stage and kind of like wanting to hear a bit more like how what was that what the bootcamp what impact did that have and how have you seen phone bank + +[13:00] Growth since then so I might start out at that michael and I we actually met through this bootcamp he was our mentor and provided some very sage guidance for us and obviously it led to us being one of the finalists and I think in many respects has led us to dr drive hone our product market focus a lot more which catalyzed in a 3 5 million dollar seed finance thing among from leading crypto and venture investors so I thought the camp was really great for us and we actually in addition to the proceeds from the camp also received a another Stellar grant as we planned to incorporate the protocol + +[14:00] Into some programs that we'll be launching as soon as next week I'm very excited for that yes I heard about it the marketing grant from SDF that's a yeah that's great news I'm very excited that you know we were able to support in that way and you know kind of like going back to what you said about like michael and kind of coming on boards and so michael you were a facilitator of the team of phone bank team right because like okay so to backtrack a real quick so every team in the bootcamp has a specific facilitator assigned and in this case michael was the facilitator of phone bank so michael what was that like first being a facilitator of the team in the bootcamp and now co-founder and head of growth Africa of stone bank yeah thank you very much I was actually seated at the same spot I'm sitting now and + +[15:00] Chris and I met and I guess it was a bit over in hindsight it was a bit of a surprise but also a lesson for me on the role of such ecosystem building programs like the bootcamp it's really I think a lot of us maybe might have might think about it in terms of coming to raise funding or accelerating some of the products that we are working on but for me I mean the opportunity I got was matchmaking or finding other ways to benefit from this network and Discord of great companies and great founders so I think that would be my that was the biggest lesson that I took away from the benefit of such programs and the kind of value you can derive from these programs beyond just the old fashioned tried and tested funding it can also be a place where you could meet potential partners it could be a place where + +[16:00] You could be a place where you could meet co founders like myself you know so who knows so just look at it as a ball of value that you can exploit depending on whatever needs you have yeah it's amazing I love that like matchmaking that you really bring that up it's like a book campus a matchmaking event I mean I think like it has actually like it's like when we first started I had like you know like for me also like going in like okay like we are doing this and then coming out it's like all of these connections that you really build and I was also like oh it's going to be like a virtual event I'm not quite sure what that's going to be like but the connection that we made and kind of the lasting relationship have also like on me like really helped and kind of like last question for either of you to answer because like last in a prep session we also like talked + +[17:00] A bit about kind of like your business model what is the what business model what is the business model that I have allowed for like such growth in the company so we are at its core an exchange where we match individuals and businesses that might trade in prepaid air time with individuals seeking digital money and I think that's sort of the inflection point that's taken off in our business where you know people are trying to go from the fiat world into crypto web 3 and all and even traditional finance but in a the lowest friction sort of a path and I think exploring and you know and partnering with Stellar I think is a great way to reduce remove that friction in that process and I think you know + +[18:00] I mentioned that a I think you know I mentioned that a couple times because that's something that's key to sort of user better user experience and act actually in my journey through sort of product discovery in this Stellar ecosystem I was very pleasantly surprised as to the progress of many of the developments whether it's the AMM and the other various Stellar products and I think in large part because it is a you know a low cost eco you know environmentally friendly ecosystem I think it touches all the major items and it's there's been significant improvement and so we're excited to be partners I thought that was all muted awesome well that's great to hear like we are also excited to be in connection with all of you and SDF is really here to support those who are building on Stellar sellers open source by the way anyone can choose to build + +[19:00] On a network but we can choose to build on a network but we as a non-profit really try to see how we can support people that are building real world solutions on the network so you know I'm very it's very great to hear this as well and then kind of I wanted to go into bonafide so a few questions for ope actually what was your experience with the bootcamp and thank you and kay so like I mentioned earlier when we came up into the bootcamp we had a different product right that was a digital bank for migrants and we had already started experiencing some challenges in terms of well the product was a great product did they really communicate or fulfill our mission which is basically to like democratize opportunities in general right and the bootcamp helped us to really walk through a design thinking process to actually answer some of + +[20:00] These questions and come up with something that's really simple basic and works and for everyone now the result of that is bonafide that digital wallet that lets any anyone save send and receive in digital dollars and these is available to all africans or one billion africans who are currently you know not connected to the global world of commerce or trade and that's massive by itself and also something we're very excited about that happened within the bootcamp was being able to kind of fashion out this no lost price that allows people to actually kind of win big amounts of money right without risk of losing any money I mean that's like super fantastic and then that's something that's new that will bring it to the + +[21:00] Continent on that so the bootcamp really helped us pull all of that together and come up with this simple product we're still pre launched we've already been transacting millions of dollars based on what we've on a monthly basis the 2 million last time last month based on what we've done so far and soon we'll be going out big time into the market yeah oh wow that's a lot congratulations on that type of volume already before or before launching that's amazing yeah because I remember like we were like trying to figure out the use case and obviously like you know there are some like restrictions we can do and I kind of like work out a way that really like is a great business model so yeah I'm very excited about that and being able to use blockchain use Stellar to allow such features is just something you cannot do with any other technology really so you know a little bit back checking back so you have + +[22:00] Been part of like money me right and money me has been in the Stellar ecosystem for quite some time can you explain a little bit about what moneyb is and did the bootcamp kind of experience change anything towards your engagement with Stellar yes so money me like I mentioned earlier or the digital bank focused on migrants right and we built that product within very strict context of U.S. Regulatory infrastructure so you know a couple of things you know controlled by the banks as agents of the banks U.S. Persons only not fully democratized and you know we were trying to kind of do something that was a bit more democratized and that was what bonafide was so how did this Stellar ecosystem up there beyond you know just the bootcamp in itself I think the relationships we got very good referrals to other classes ecosystem that we could work with right + +[23:00] The that we could work with right the folks that we had the folks at wire those were all you know relationships that were broken by the Stellar organization so that was really helpful too great yeah I know that's really great to know and it's also like being able to grow in the relationship develop and I think like that's also something I want to point out to future like or like to current seller company ecosystem companies build on Stellar it's like you can always like develop a relationship even through this bootcamp but also through other avenues and we definitely encourage that so thank you so much opa that was a really great outline I will come back to that later I wanted to hear about like funta I wanted to hear from you of kwedru and so creatoru was in a kind of + +[24:00] Like an early development stage when you applied to the bootcamp what made you decide to apply thank you okay I think for us one of the reasons why we decided to apply for the bootcamp was because we were really like you mentioned really early stage we wanted to sort of prove or sort of use the bootcamp to sort of make sure that we were going down the right path and you know the bootcamp definitely exceeded our expectation in making sure I mean we mentioned the design sprint being able to make sure that you know the idea that we had was something that was not only legitimate but that would work and I think some of the other things that came out of the bootcamp was also you know just this additional idea of you know being able to get investors because the way equity works you know it's an essay SMS vendors and then investors and you know now with the bootcamp helped us to sort of highlight or crystallize the fact that we can + +[25:00] Then really go cross border payment and you know tap you know no matter how small investments from investors from across the world right and so that was one of the things that came out of the bootcamp and we're so excited that not only did we finish the bootcamp we got five thousand dollars which we're excited about but then we were able to transition into the development community and we were able to get additional funding that has allowed us now to hire for additional engineers and we have some funding for marketing so we're super excited and even though we're still pre launched I think of the next maybe a few months what everybody's gonna be hearing about creatures so excited super excited yeah oh I'm excited to hear that also about the scf and I'm coming back to that in a later question but quickly like kind of like about the bootcamp experience because you did mention you know because we had I believe like companies in the booth came from very different levels of development and + +[26:00] Yeah I kind of wanted to if and wanted to kind of go through what your experience was and what you liked about it and kind of like what impact that had on your business in a bit more detail sure so I'll be the first to say that I'm a female non tech founder and so even coming into the bootcamp there was a bit of trepidation because I was like oh my god what am I getting myself into but I will say that you know not only was I able to keep up with the design sprint and with everything that was going on I think there was just a sense of at least for me there was a sense of okay you know the people who are you know there's a big network out there that are willing to support I mean you just even you Anki I mean just the encouragement along the way our mentor was Joseph benson I don't know I mean just the encouragement to the team I mean we'll just you know + +[27:00] I think there are no words to describe and I think also just give us another level of legitimacy in terms of what we were trying to do so for me I think like I said it was just the network to see that there are other people I'm trying to do the same thing but to know that there's a network of people who are like sort of cheering us on and who want us to really succeed is just was just phenomenal yeah well I'm super happy to hear that yes I'm 100 behind all of you and definitely I'm super excited that you're able to grow on it you're also wanting to continue your journey with Stellar so thank you so much fonto and also like big claps to all women founders here like it's women history month this March so big class honestly like I'm super excited to have you know each of you but also you know in particular because it's women history month on board big claps all women founders and I you know like so I guess going a bit into that it's a little surprised question but did that change anything + +[28:00] Being a women founder like what would you kind of like say also to encourage other women founders in the space and especially in blockchain as well to get started so I would say do it afraid but do it you know because you're not ever going to I don't think you're going to ever get 100 comfortable so just you just need to do it and even if you don't I mean one of the things that I think okay mentioned that the bootcamp helped him to do was sort of pivot a bit I think you know this was a space to be able to after you're done with the design screen you start to sort of question okay will this work won't this work and it the bootcamp was a safe space to sort of explore those ideas so especially for female founders again I can't say it enough we don't get as much support I would like to see but this is a really safe space this is a place where you get a lot of support + +[29:00] So place where you get a lot of support so I just really want to encourage a lot of female founders who are maybe on the fence to make sure that they apply and they will get the best of support that I can attest to personally so yeah thank you so much for mentioning that yeah I think like we cannot stress enough it's like if you are you know like new to blockchain or you're kind of like on the fence like I think like it's very important just to apply just to get started just to get in there and who knows what will happen there's a lot of excitement in the space a lot of opportunity and yeah so I'm super excited so before we kind of go into also recommendations I wanted to highlight a very special project as well that I mentored I'm very excited about I'm excited about all of you by the way just but honeycomb in particular I was meant I was a facilitator and I was part of the bootcamp process so very excited dave and Dominic please what was that what was participating in the + +[30:00] bootcamp like yeah participating in the boot comforts it was grueling I like to tell David that on the third day of the boot company he was directed because I remember he didn't sleep one day on the night before them a day but it was so much fun we were truly stretched but we were able to like lay the ground up for something that we pretty much worked on for months afterwards and we're really grateful because of the bootcamp it allowed us to explore how we went in would come to the question of how do we help our creators and their funds be able to like you know send receive crypto and we walked out of the bootcamp wanting to build a whole NFT marketplace and we've been able to do that at least we've done a proof of concept for it we wanted them in the bootcamp we were able to like implement a fast checkout flow + +[31:00] For our creators being able to like receive XLM and like I mean in a way that would be pretty easy for non crypto natives to be able to support their creators and that was so much fun and I mean it really stretched us and we went ahead immediately after to just keep on building on that and I mean it's been really good since yeah and just to add I think one of the best things about the boots camp is it forced us to have a sense of urgency I think everyone in this panel can vouch for that there's a certain lethargicness or latency that comes in your kind of startup whether you know you have your roadmap on notion and you queue it up for three months down the line it feels like it never comes you always work yes with some urgency but it's never really like oh this is next week that we need to do you know a specific thing and that sort of like would have put us + +[32:00] And that sort of like would have put us in the frame of mind of thinking if I literally had a week to map out a sprint and get to a point where we actually have an mvp or something that we need to ship out what life looked like and I think it's something that also helped you know to instill that culture in the company itself like you know as we expand the team and you know just carrying that three day sprint culture you know holistically across everything that we do and the growth attraction has been you know enormous up until this point like we've gone from you know free bootcamp we're doing maybe a thousand dollars a day on the high side now we're processing up like up to a hundred thousand dollars in a single day and that's exciting to us yeah and the validation that we're able to accomplish in the bootcamp and you know carrying that forward you know building a end to end platform we launched our escrange in one of the first platforms to launch a fractional NFT protocol that we also brought over to + +[33:00] Stellar which is super exciting you to so just so much that came out of it and also just being a part of the Stellar ecosystem and which is you know constantly growing and super supportive on every front especially SDF and having you as a mentor was pretty great to anyone watching this I'm biased and I was if you guys jumping in Anki should be your mentor well that's great to hear but I think like we had great mentors on board too right and I think everyone here like feel free to also chip in and kind of like tell how grueling of an ex well grueling but good of an experience it was to have this like busy packed three day bootcamp but actually having clear results at the anti demo day and that and it's like having that like working with facilitator very closely yeah I would love to hear if + +[34:00] Anyone you know wants to chip in on what David and Dominic just said who else had a grueling experience I believe was it phone bank I believe Chris and you jumped in from the U.S. Yeah so I was in the U.S. So I it was a very long week getting up at early east African time to participate and just to demonstrate the committedness of the program I think someone called me and woke me up the first morning to make sure that I was showed up on time but I'm actually forever grateful for that because then we were able to be fully engaged and that just showed the level of you know the sincerity of the engagement for the program so but maybe get some sleep the night before and plan to go to + +[35:00] Bed early if you're and plan to go to bed early if you're going to join from other parts of the world yeah and just to add in I think we still have that problem with Chris now because he's on dc time I usually have to have my working hours go all the way up to 7 00 p m in East Africa time yeah but just to add to David's point around grilling I think it's also about the kind of vibe you get with your mentor facilitator because even at phone bank I felt with Chris was a really mature founder I think he is mentioned it exited a couple of companies so when I met him I really felt I had an easy time working with him we really built up a great relationship together so it doesn't have to be all grueling I think it's also about finding like the right cadence with your mentor and the founder you're assigned to yeah and my and michael was pretty great to work with and I + +[36:00] Think great to work with and I think that looking at some of the other relationships while I don't know if others joined their the companies like michael but I do think they spent some time in providing the matching and I think all the mentors are very thoughtful in terms of how they helped guide us through the process yeah that's great to know I'm very excited about you know what happened I was also like quite surprised by the level of engagement that facilitators were like that facilitators had and I was also like oh wow these days especially the first two days like we were like in the zoo and well I guess like was on Discord or something we were like in Discord we were on let me see like we had the slides we had the sigma you know and also like to the folks watching like obviously like in person and hopefully we can do some in person bootcamps sometimes and + +[37:00] I can all meet you in person what was kind of like what surprised you about this like virtual model and like what was your experience with that format and it's an open question and then I'll start to get into some more specific questions again okay did you have any oh yeah Ben feel free to yeah go in yeah just the follow through is what surprised me the most so even this event for example is another convening of you know all the participants and we've had various conversations since anka I know you've reached out multiple times and so I've been a part of different bootcamps over the years I actually even worked at DFS Lab for a time for a couple years and I think the follow through from Stellar and DFS Lab has been really fantastic and it's clear that this wasn't just a transaction you know like let's do an event let's get some press around it there's actually real relationships formed and invested in and as a participant + +[38:00] It's clear to me that's the intent of the organizers and so I really appreciate that and I do feel like there's kind of an emerging community here I don't feel like an outsider to the ecosystem so just kudos to you all the organizers for the follow through there and really investing in that relationship yeah and kudos to you for also like continuing your relationship on Stellar it is definitely so not without its challenges right because like it you know this space is a new space right it's a growing space as like I think someone mentioned earlier like we have constantly new technology coming out and people will adapt to that and implement it and grow in the network it's a we are growing and there's growing pains coming with this not only from technology but also from you know the regulatory standpoints to you know users experience and kind of like work we're at the forefront of innovation + +[39:00] So I kind of wanted to hear a bit more about opa's experience in this like well because like obviously you know we've talked a lot about how everyone's had a great time how everyone has grown so much but now I also kind of like very important want to highlight some of the challenges and so opel like what are some of the challenges in getting a bonus what were some of the challenges and getting bonafide to where it is right now thank you okay and I love the fact that you're trying to kind of point out the good and the bad thankfully there was no ugly so I think one of the biggest challenges especially for folks who are developing you know blockchain or whether it's cryptocurrency specifically is navigating the regulatory space number one it's not really clear if you're following the news if you're looking at what other people are doing you tend to think that makes it + +[40:00] Writing but might not necessarily be the writing and also in terms of in innovation some countries like a bit should I say open around you know what's happening cryptocurrency I'm innovation some are kind of really closed and loves you know available companies but it's you as the right domicile what does that look like for your partners do they like you do they love it would they want to be a delaware company but you know a crypto license in some way or some money transfer license is none of the country you know do you need one license in the U.S. That allows you to be much out of record but unless it's really operating in the U.S. There's a lot of things that we have to kind of figure out the ad way and I mean while this is the evolving one of the things I kind of said + +[41:00] After the things I kind of said after the other bootcamp and I just want to repeat it yeah I'm for everybody's benefit for prosperity's sake is that it'll be helpful if a foundation like Stellar or a consulting foundations you know publish information out there not necessarily legal information you should have to consult your own lawyer about what old what works and as far as we know it I mean they create the world on regulatory on a country by country basis what are the basic things you know you need to have me by that play point of view so we have to do x you know to be a so we have to do x you know to be a pilot for example to move money for example or to just play our web tree those things would really help to foster innovation because the reality is folks are confused nobody wants to go to jail for trying to change the world right and we all want to do good and we need all the help we can get yes no I totally agree on that front it's it can be difficult navigating especially + +[42:00] Like and not only in all types of countries every country right now every government is figuring out what do regulations look like around this new paradigm of internet applications and I think like as a foundation we definitely are on the forefront of policy but there's always room for improvement so definitely I think yeah these are all great recommendations and we should look into that for and like as we grow as a team and I think like it's not only on the foundations and I think it's as a community as a whole being part of it like getting each other in the know and sharing like what's going on I think it's really important so we can all grow as a community anyone that wants to add to that because I think it's a very important interesting aspect and also like not only challenges with like regulatory but also like others so I would love to before we kind of go into recommendations for book and participants I would love to like kind of + +[43:00] Like reflect on what happened hi nk I just wanted to say I think danielle had his hand up danielle is one of my co founders from quidro but I also just wanted to sort of echo exactly what okay was saying but I think one of the things that we're bumping into as we're building is this whole regulatory ecosystem especially in the us and you know I'm too cute to go to jail so I'm like I need to figure out where exactly what exactly is going on and you know I found myself going down this whole of reading about fincen and you know just a lot of information so I'm going to be hitting up me afterwards to say okay dude please what did you do and just you know maybe clear the way but I know that danielle also wanted to mention a few things as well from his experience so Daniel if you're there please go ahead thank you so hi Anki hi everyone I just wanted to I think it was a question I was asked earlier I risked mine I think somebody else also mentioned something I just wanted to say that these are my experience I + +[44:00] Think all the major thing I experience I from the bootcamp was the personalized mentorship I think mr joe really did a good work because I'm sure some of us be the engagements and sometimes I feel like drifting off like okay let me just be in the meeting and not be in the meeting or being the whole thing and not being it you know but mr joe will not even give you the opportunity he's always engaging and whatever and at the end of the day you realize that yes it was really worth it and I really was not surprised at the fact that I get I guess everyone who participated actually came out with something and that was because of the all personalized thing in my opinion everybody was carried along no matter what you are building you all everybody got something out of it and it really helped and I'm sure everybody's doing what they need to do at this point in time I just want to have that and say shout out to the scf team and the dsl lab thank you very much yeah I thank you so much to Daniel + +[45:00] Yeah I thank you so much to Daniel I didn't realize you had your hand up I guess like I'm in the wrong setting in zoom but definitely like thanks so much for speaking up I think you know I'm very happy that this personalized like feature really works and you know very relying here on DFS Lab for the design sprint you know bootcamp of course so Joseph do you want to add a few words to that yeah so I think that's one of the things that makes the bootcamps quite like special and engaging it's the fact that we have these mentors who are kind of really working with you to make sure that you are not like dropping off along the way and just basically you know trying to help you find clarity so we have like a bunch of so you know you can be a player who has already you know who has a ton of experience building products and has you know + +[46:00] Had money me or you can be Phonto who just like started working with engineers and he's still trying to figure it out and you know there's going to be someone who can help you and walk you through you know stuff you know during the bootcamp I think it's also very important that you know like everyone has spoken about like you know the involvement of the Stellar team right so you know if you wanted to understand like a business case right for what you're building it was pretty easy to talk to someone and say hey help me figure this out and if you're just ready to like delve into the code and you know the test nets was available that's like really stable enough for you to delve in so I think that you know that's like the whole package you get you know with the bootcamp generally yes definitely thank you for providing some more details around that and I think like important to note + +[47:00] Is that the bootcamp is not the end right so even like oh we you know we it is not it even like oh we you know we it is not it did feel like the end it definitely felt like oh after these days I'm going to take I actually took a long vacation off oh like man this was a lot and you know everyone else like I'm so excited happy that you know like we had such a good time together we really spent the energy but you know after bootcamp it is not the end it's like it's really the beginning of your journey on Stellar and I think like that you know like I think we had very quickly after a bit too quickly actually I think we had a round of the Stellar community fund which is an open application grant program to support Stellar-based projects with funding but also it's really about that it's from the community so while SDF provides the funds in the scf which is an acronym for Stellar community fund the community actually allocates it so it's a way of building your relationship with community and then also growing so some + +[48:00] Of you went through the SDF I think like it was a bit early stage in like where and you know as we continue to grow with the communities there might be more opportunity there will be more opportunities we have current round open but one particular project that went through is cuidroo and kweduru I would love to hear your experience and kind of like talk a bit about your experience with the Stellar community fund and yeah explain that yeah you were a winner yeah so thank you so much okay so and I first of all again I have to say thank you because you definitely prodded us to make sure that we applied and I think there was still a bit of the fatigue from the bootcamp and we're like yeah we'll apply it but anchor really followed up and just said you guys need to get on top of this and which we did and I'm very grateful for that because that opportunity allowed us to get funding of I think was that five thousand dollars which has helped us to like + +[49:00] I mentioned earlier on hire like four engineers we're going to do a bit of a marketing blitz right now and some other cool things and honestly if we didn't get that funding from the community fund you know I would be running around like a chicken without his head looking for money right now and that has just given us enough time to sort of really build and build the way we want to build so I'm super excited I mean one of the things that you know we I mean you mentioned Discord earlier on right which is this interesting online platform where the developers and different three people chime in and one of the things that process allowed me to allow myself and Daniel to sort of really to do was to hone our product and be able to explain to anybody who didn't know anything about what we were trying to do right and we received a few questions where I was like huh and then you know someone would ask the question and just go in and I'll scramble back with you know I'll see yesterday I'll be like how do we this is yesterday I'll be like how do + +[50:00] We this is a value point what do we do how do we solve this problem so again it was teasing us a lot of you know issues questions that we needed to start thinking about and thinking about it you know really deeply so and the process wasn't difficult to be honest we just had to apply and just again make a case for what we needed the funds for and we didn't ask for too much money we just asked for really what we needed which I think is also very critical and we got it so again I'm super excited I can't say enough about this whole process you know I you know like I said I you know we're building and I'm so excited and you know when I talk to other founders that I know and you know I think after that experience everybody I mentioned were like please where do I sign up so I'm expecting that all the founders that I know are you know going to apply for this bootcamp because again it's giving us just that opportunity to be able to build in a way that we're not rushed to give us a bit more runway I would say and the process was not difficult and there was a lot of helpful people you know chiming in asking questions sort of prodding what we're trying to do + +[51:00] And yeah and we got the money so yay thank you scf yes you I'm very excited about that now I don't know if Daniel wants to see something but yeah so we're we've been in the trenches together and I will say that you know that funding has allowed us like I said to hire more engineers and I think we're 60 of the way so again like I said very soon we're going to have a product and it would not have been possible without again this funding this intervention and to just even know that we could always come back and the other thing I have to mention quickly to anybody who's watching is this is like non dilutive so it wasn't nobody asked for anything this was just okay we believe in your products will be leaving your idea here's money that good build and let's see what you have so again I can't even say enough of it I hope everybody who's listening signs up for the bootcamp so that + +[52:00] You can then be recommended for the scf I can't say it enough so thank you Anki yeah awesome yeah I mean thank you honestly like I think and for like weekend they also can't control their community so while I can always encourage you to sign up it's really for scf it's the community of both here so that's a really well the validation and for any anyone watching like it's a great experience but we are coming up on time very soon there is one question for honeycomb that I really wanted to dive into and then dive into like some quick recommendations before we kind of enter let's talk about some details about the bootcamp since we did start a little bit later so if you need to hop don't be shy but yeah so David and Dominic they delved into your project yet but actually your project doesn't only use Stellar it also uses multiple change and I would love to for you to give a bit of I would love to for you to give a bit of details about that and what does that multi chain future look like and how does Stellar really fit in yeah thanks for + +[53:00] Asking that thank you I'm super happy to join in there so yeah and you're absolutely right like for us you know we are strong believers in interoperability and in building bridges so that like there's better interaction across chains because there's also like you know some value props that may come from one chain that can be found that might not actually be found on another one and for us and on an infrastructure level what we've essentially built is like a cross train bridge for six different blockchains so that would include cello and polygon Ethereum Stellar bitcoin and flow as well oh and harmony and why that's important to us is we believe that in order to like you know create true utility like we want to actually build like a multi chain platform that allows folks to one benefit from the value props that a particular blockchain might provide but + +[54:00] Why Stellar is such a might provide but why Stellar is such a key component of that bridging is one the automated marketing making functionality on Stellar is a lot more affordable and allows for a lot more modifiability than other chains might also provide but you know tying in one the affordability of the blockchain two the fact that it's very programmable in comparison to like other blockchains as well allows us then to build frameworks and protocols on top of Stellar that we can then leverage in our cross chain bridges so that can be for swaps which already provide so like imagine swapping USDC from XLM to Ethereum or imagine escorting a transaction on polygon and receiving it on a different chain like Stellar for us Stellar as a blockchain provides the interoperability and programmable nature that allows us to then build like you know really cool products around the use cases that we're trying to power on top of multiple chains + +[55:00] Which is super exciting to us yeah I think this is a yeah thank you so much for bringing us up and for explaining that multi chain nature of honeycomb and I think that's also like where the future's headed right I think like we are kind of in the early days of the internet so to speak like in the 1980s 1990s like when you know like internet started to browse up you have all these different things and you know we're all growing as an ecosystem and to find like interoperability where needed to really because the end goal really is to provide support or answer the needs for your user audience right whichever way is best so I'm very excited for that and I'm also very excited that David and nominee to see your product really grow and also receive funding but we are definitely on time now and I just you know before we dive into kind of like a bit of the details of the boot of like a bit of + +[56:00] The details of the bootcamps and the discussion I wanted from each of from whoever wants to give their opinion a kind of like if you were all going all over again you would apply to the bootcamp like what would you recommend new bootcamp participants to for yeah to do or like for any advice you wanted to give I think I can chime in there first I would say just keep an open mind the thing that you think you might be entering that we're going to build might not be the actual thing that you'd end up building which is a very it's very good to be open minded if you stay rigid it might limit you from you know the very thing that you probably would you know be more better off building instead of the one that you had in your mind going into the boost yeah go ahead it's free for all now okay so I think my advice to + +[57:00] Other okay so I think my advice to other people who are coming in is to just have an open mind like I said earlier on the kind of value that you can get from the Stellar blockchain network which is more like it's a network not just a bootcamp so I think this is that kind of perspective on the value of this bootcamp I think something else just some general advice for founders out there I think my co-founder Chris mentioned that it's really important to have a product that's relatable that's easy to onboard that really takes out and abstracts away the some of the complexities of the early internet like anchor said and I think for me the other component which is something that I found to from phone bank is solving problems in Africa right now you know and a lot in Africa right now you know + +[58:00] And a lot of those problems are how can we get more young people earning in the gig economy how can we likely leave them of some of their daily expenses whether it's air time or data or whatever else you know just whatever it is you can do to help these young people their pockets and they are not living I think it's really going to go a long way in addressing those problems and as someone who's running growth activities for phone bank it really makes it easy to sell the product I'm having the easiest time selling the product because I'm just telling people look this is something that could help you save up on your airtime and your data bills per month you know that's a very strong proposition so those two points for me I was going to say you know I would encourage everybody to just apply for the bootcamp you know don't you know + +[59:00] I mean yeah just apply because you know once you sort of get I think hump of application and you get accepted it sort of unfolds and unfolds itself where you know the amount of support both technical and just emotional and you know everything comes into play but also you know the opportunities that this wall sort of opens up like you said I think you know anybody who's in doubt as to whether this is a wave of the future or not needs to you really get with the program and so to be part of this sort of not necessarily wild west but I think that unfolding be part of that journey is very critical and again you're we're all part of a network now of people who are sort of trying to figure this out together so you're not by yourself you're not alone so again I'll just say please apply and opportunities will sort of you know one thing sort of builds up after the other + +[01:00:00] You do this and then another opportunity opens up you know through Anki's suggestion we applied for the berkeley accelerator program and you know hopefully we'll get into the program knock on wood but those are opportunities that we would never have ever thought about you know if again we didn't even apply for the you know the bootcamp so again please if you're listening just apply just start the application and don't leave it too late make sure you hit the deadline apply yeah I love that just apply and also you're not alone in the space like it's like there's like I think like because we're all here like we're all together at you know all struggling we're all like building on furthering innovation and it's not easy and so we're together as a community and also the Stellar community I'm very closely connected with and I think that's a really great place to get started as well and you know like thank you for your I just quickly wanted to because like one thing that's from ope and also like wanted to hear since like what opportunities and kind of like + +[01:01:00] In the investment advice like a like what are their what are the opportunities like and into getting investment in this space look like and like what is your advice for future bootcamp participants yeah I would quickly talk about that definitely webtree is very odds now and clearly companies that didn't stop in this space as long as there's a real job to be done for users there are investors that are willing to back you but the challenge with a lot of entrepreneurs is figuring out how to reach out to those investors how do you get coinbase how do you get you know Stellar on your captive I would say this is one of those ways you can start getting to some zombies inwards so yes I'm following from those words yeah please click on that body I'm your business on webtree apply for the next bootcamp amazing all right well I've kept you here long enough longer than you wanted to no but + +[01:02:00] You know thank you so much to each and every one of you like I am so excited and I honestly like I feel very proud of what we've accomplished and I'm very proud of all of your growth I take it personally it's like I've been part of this process and I feel like it's kind of like part of your growth so definitely you know are very excited and also you know like going into that we have another bootcamp coming up I am super excited and I would love Joseph if you wanted to I'm going to share my screen so we actually have some fancy slides coming up one second for Joseph to present and then we can start to end the bootcamp but not and this the discussion of beautiful growth among these companies so Joseph take it away yeah so you know it's like we've + +[01:03:00] Spent like the past like our floors talking about the boots camp and we're we have another one coming up it's a three day virtual bootcamp where we basically work with you know teams to go from you know your idea to a prototype in those three days and the idea is to basically help you like think fast and think through like some of the problems you face and then kind of come to like a very simple like you know solution that you can immediately like start playing around with you get to work with mentors you know whatever idea you have you'll have a mentor to work with you have the exciting amazing team from SDF that everyone has been talking about so far to also guide you from anything you know from business cases to technical challenges and then even past like the + +[01:04:00] Boots camp you have all that support still you still have the community you there's going to be a demo day at the end of the boots camp where you present you know your prototype and we are you know awarding prizes with the SDF for the foundation from anywhere from you know five thousand to twenty thousand dollars a lot of excellent and so and there's of course like Funto has pointed out opportunity for follow up funding you know from a bunch of you know people including the Stellar the wider Stellar community itself so applications are ongoing now you should definitely apply you just go to the link as seen on this screen and apply we you know extended the application date again just so like more people can get the opportunity so applications close on the 27th so please do apply it's really it's like it's challenging in many ways but it's also + +[01:05:00] Like very rewarding I but it's also like very rewarding I can assure you and just like you know most of the panelists here said like it's really it's a really great opportunity yeah thanks you did awesome well thank you so much everyone for being here for sharing your stories for your experience and I really look forward to your growth as companies please keep me updated and stay in the Stellar community and the SDF because the SDF is also upcoming and definitely has a lot of opportunities this round and yeah I'm excited to hear from you and definitely to all of future bootcamp participating participants watching applications are open as Joseph mentioned and definitely apply so thank you so much again to everyone really grateful for your work and yeah let's stay + +[01:06:00] In touch + +
diff --git a/meetings/2022-04-28.mdx b/meetings/2022-04-28.mdx new file mode 100644 index 0000000000..a84f677922 --- /dev/null +++ b/meetings/2022-04-28.mdx @@ -0,0 +1,157 @@ +--- +title: "Host Guest Boundary and Data Model" +description: "This session digs into CAP-46’s foundations for Stellar smart contracts, focusing on the host/guest execution boundary, core value/object types, and how a shared data model can enable deterministic execution and future interoperability." +authors: + - david-mazieres + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [legacy, CAP-46-1, SEP-8] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This discussion introduces Project Jump Cannon’s early building blocks by walking through CAP-46 (now CAP-46-1), the proposal that defines the low-level vocabulary for a WASM-based smart contract system on Stellar. Rather than adding user-facing transactions immediately, the focus is on establishing the “nouns” (types, handles, host objects, and XDR structures) that later protocol changes will build on. + +Much of the conversation centers on how smart contracts should execute deterministically and efficiently: contracts run as “guest” code inside a WASM runtime, while Stellar Core acts as the “host” that enforces limits, mediates storage, and provides shared primitives. The group also surfaces open design questions around interoperability with classic Stellar operations, authorization models, and what requirements should drive the evolving architecture. + +### Key Topics + +- Project Jump Cannon direction: why Stellar chose WASM and why CAP-46-1 is a foundational, non-user-facing building block. +- CAP-46-1’s “vocabulary-first” approach: new XDR definitions for values, objects, and handles intended to keep future CAPs smaller. +- Host/guest boundary: guest contracts run in a constrained, deterministic environment while the host mediates access to resources and data. +- Value vs object split: passing simple tagged values across the boundary while storing richer structures as host-managed objects. +- Shared data model benefits: lower contract code size, reusable serialization, better contract-to-contract interop, and more inspectable on-ledger data. +- Open questions on determinism and safety (e.g., whether to support floating point and how to avoid non-deterministic behavior). +- Interoperability debate: how much “classic” Stellar functionality (payments, trustlines, authorization, signers) smart contracts should be able to invoke. +- Authorization and privilege models: whether signing-weight-style permissions are sufficient, and how to avoid confused-deputy/ambient-authority pitfalls. +- Forward-looking performance considerations: discussion touches on parallelism concepts, while noting CAP-46 itself is intentionally limited to core types. + +### Resources + +- [Project Jump Cannon: Choosing WASM (blog post)](https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm) +- [CAP-0046-01: WebAssembly Smart Contract Runtime Environment](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) +- [CAP-0046-01 discussion thread](https://groups.google.com/g/stellar-dev/c/X0oRzJoIr10) ([alternate](https://groups.google.com/g/stellar-dev/c/vkzMeM_t7e8)) +- [SEP-0008: Regulated Assets](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0008.md) + +
+ Video Transcript + +[00:00] Hey everyone, welcome to the Open Protocol Meeting. Sorry for the slightly late start, just a quick technical glitch, as per usual. So the Stellar protocol meeting. In these meetings we discuss and plan for upcoming changes to the Stellar protocol, and today we're going to talk about something very exciting: Project Jump Cannon, which is going to bring smart contracts to Stellar. So just quick intro. As most of you know, Stellar was launched in 2014. At the time there was a very deliberate design decision to keep things simple, right, so only support a fixed repertoire of transactions. But at this point, after a ton of feedback from the ecosystem, it's clear there's a need for more flexibility. So developers- they're + +[01:00] Interested in building applications. They're relying on submitting custom turing, complete contra code right to run in the transaction execution phase of the network. So essentially, people want smart contracts. So in March we officially kicked off Project Jump Cannon- and there's a link to the announcement in the meeting description- and after a very thorough examination of the existing smart contract landscape, we made a decision to build it on a webassembly. Run times wasm for short. So if you want to know more about the selection process. There's also a link to a blog called project jump canon- choosing wasm and that will sort of help walk through that decision, the evaluation that led us to where we are today. But today we're actually going to start to get into the protocol changes necessary to like accommodate that decision. So we're going to start by talking about the Core Advancement Proposal, [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md)- webassembly smart contract runtime environment, which, as you can probably guess by the title, specifies the lowest level code execution and data + +[02:00] Model components of a wasm based smart contract system for the Stellar network. Once we talk about [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md), we'll also start to ask some general questions about how smart contracts should interoperate with the network. This is, as per usual, a technical discussion, so if you want to keep up, I suggest taking a look at the CAP as well as the discussion thread about it. Those are both linked to in a meeting description and then, if you're interested, you can also join the conversation about smart contracts. You can do that by participating in that thread that's linked to here or by joining the Stellar developer Discord, which is where we're having all the Jump Cannon discussions so that the work is open, the participatory we're doing, you know, can really follow along on the Stellar developer Discord. Okay, [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md). So [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) came out on friday. I think there's been a lot of back and forth on the mailing list, though over the past few days a lot of questions asked, but I'm going to pass it to graden grading, I guess, to start off with, is there anything that you want to tell us + +[03:00] About [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) and or about any issues that you have that you'd like to discuss about it today? Thanks, yeah, there's lots to discuss here. I want to give a little bit of background, because this is, in some ways, an unusual CAP. Often times when we write CAPs, they're directly changing the protocol or they're proposing to directly change the protocol that people are using, and [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) is a little bit weird in that it contains protocol contents. There are new xcr definitions like there would be in most CAPs, but none of them can actually be used. This is a building block for other CAPs, and so if this CAP, for example, were accepted as is tomorrow, there will be no new messages that you could send on the network yet. So this is just a building block CAP. It's a piece of vocabulary that we want to talk through the potential uses for in the future, but without it, without actually committing to a specific transaction + +[04:00] Format, specific ledger entries, anything like that. This is a vocabulary in particular, for interoperation between two environments. So the way to think about this CAP is that it's describing two different places in inside of Stellar Core where code can run: the guest and the host, and so that division between guest and host is a key concept that's being introduced here, and most of the CAP is a discussion of how the guests and hosts are going to relate to each other, both their calling relationship, with what values can be passed back and forth between them, and then the types of data that will be stored. In this particular proposal, the way we're discussing data storage actually involves a split, and it's a split that's brought about by idiosyncrasies of the wasm runtime interface, which is that you can't really pass complicated data types back and forth. There's a few other reasons for this. + +[05:00] It's actually there's a there's, large rationale section of the CAP that tries to lay out our thinking as we explore different options here. There's an efficiency argument, code size, there's a compatibility, the ability for third parties to browse and interoperate, for contracts to interoperate with one another. There's a whole host of different reasons for splitting data the way we are. But really the way to think about this CAP is- and I've had this conversation with a few people so far- to picture in your mind a sort of a four quadrant diagram: upper left, upper right, lower left and lower right. And in the upper level you've got the host and guest environments on the left and right. Okay, so those are at runtime while code is executing. There's these two environments that are communicating with each other and + +[06:00] Sharing access to a data model where much of the data is stored on the host side, but the guest has these little handles that reference into it and then in the lower two quadrants you have corresponding to each of those types of data, what we would call the values, which can be handles, and the objects. There's an XDR layer. So in the upper two quadrants of this diagram you've got the hosting guest at runtime, where values are in memory, and then and the contract is executing. In the lower two quadrants you've got the XDR representation, which is sort of data at rest, and in fact quadrants are quite the right description because the entire lower layer of this diagram is essentially all XDR. But there is a, there's a correspondence between values that are accessible in the guest side in valve, then the sc valve XDR type that's presented in the CAP. So the CAP introduces these value and object types and it introduces the split between the host and the guest + +[07:00] Environment and that vocabulary is a sort of background of what we're trying to get laid out here now. we have a prototype implementation of this. We actually at this point have two prototype implementations because we've done a version of this in c4 plus and version of it in rust, and we're fairly confident this is a structure that works well in that it allows a very small and highly interoperable guest code to run in a you know a wasm runtime that is connected to a host that has these host objects, and so most the discussion around this CAP so far has actually been like which host objects to start with, because there's a potential repertoire of multiple types of host objects and a little bit around the value representation and bit packing and stuff like that, but there's not a lot of. + +[08:00] You know, in many CAPs there's semantic rules to talk through in terms of a sequence of events that occur in response to a transaction or an algorithm that's implied or something like that. There's not a lot of verbs in. This is really more of a nouns CAP. Right, it's laying out a vocabulary of things that will exist in memory and or on stable storage and xtr or being sent over the wire, and so you can imagine all sorts of operations happening. All the actual operations that will occur on this data model are essentially left to later CAPs. They're things that still have to be worked out. We're still sort of experimenting with that. But because smart contracts are such a big project and there's so many different parts to smart contracts, we wanted to try and split out meaningfully digestible pieces of the problem and, you know, review them, talking through, have community input on them + +[09:00] And ultimately, as a software engineers, you know, implement and merge them piece by piece. And this is the first piece, so that sort of gives a little bit of background on it. There was one other thing I wanted to mention: gradient. I actually do have one high level question for you. I know you've you go into this in detail in the CAP, but can you just like give a brief overview on the specifics of, like, the value object split, because that's something that is very unique in the world of blockchains? I think that elrond is the only other blockchain that we saw that does something similar. Yeah, for sure. Yeah, so like, what is the alternative and why is this better? Yeah, totally so- an analogy that a lot of people have. This was actually the thing I wanted to talk about. I just remembered, so I'm glad you brought it up. There's this analogy that a lot of people have been using while we're talking about this that I thought it would be good to bring up here, which + +[10:00] Is that if you've used a web browser, you can really think about this in terms that are going to be very familiar. So, in a web browser, you program your web pages in javascript, but the web browser itself is a very large piece of code that is not written in javascript, right? So, like, some of the front end is written in javascript, but a lot of the web browser is written in c plus or rust or something, or c or some other low level systems language, and it's compiled ahead of time and it's essentially a platform for the applications to run on, in the end, the javascript applications. If you, if you've written javascript, you'll know that you often have object references that aren't references to javascript objects. Right, you can have a reference to a dom node or a window, or a media container or a, an xml http request or something like that. And these objects, they're not like the other objects, right, you. They're not like the javascript code that you have. You can't inspect them, you can't find the source code to them, they're just sort of out + +[11:00] There somewhere. Where they are is they're in the host environment. They're supplied by the c plus platform that you're running on. They're much more efficient. They can do things that the javascript code isn't capable of doing. They're often much larger pieces of code, and so if you had to ship them over and over again with every application, your application would be gigantic. And so there's a sort of the. By being part of the platform and the environment in which your javascript is running, they are almost like a library that you can call of very fast, very reliable, very common features, but that you operate at some distance from right you, if a function call goes into them, you know you don't get to break on your javascript debugger in the middle of that function call right, it comes back to you and then you get control back in your guest code when it returns. So that we're approaching this + +[12:00] Model and, as you said, it's the same, this that I think the lra model, uses, in a similar fashion, where we're coming up with a set of objects that the platform will provide, that are implemented in cpu plus or rust and they're kind of baked into the platform. They're things that all, or quite a lot of smart contracts are likely to want and so, ahead of time, the Stellar Core developers, ourselves and anyone else who wants to contribute to it, would have created this repertoire of extra objects. And those objects are again fast. So they're gonna- you know they're not gonna- be written in wells and they're not running on the virtual machine. They're written in native code. They're much larger. Typically there's a. large code footprint that supports them. That will not be included in your smart contract. You don't have to include code in your smart contract. Your smart + +[13:00] Code in your smart contract just calls them, and so your contract code is much smaller. But you operate them at a distance, so you have this reference to a host object when you're working on it, rather than you know, running in the memory of your smart contracts. Your smart contract has a little linear memory. It has the virtual machine has its own memory, and so you can write code in whatever language you're writing, and I think most of our smart contract source languages are going to be- we're looking actually at mostly rust there as well, interestingly enough- and so you could write a data structure in rust and have it running in the wasm virtual machine, just like you would have a, you know a javascript array or something running inside your javascript program. But we're also going to try and provide almost every object that you want as a host object, so that you don't actually have to compile anything to your contract except for a handle. And so this handle, which is essentially a pointer or a certain quasi pointer it's + +[14:00] Not actually a memory address, it's just an integer number that starts from zero. It counts up as we allocate host objects, but these typed handles that you'll be operating on and passing to host functions refer to host objects but they don't actually hold the host object in the guest's memory, and so, again, there's a variety of benefits. One of the benefits- I haven't mentioned yet so much, but it's in the rationale section and we've talked about it a bunch while we were working on this- is that corresponding XDR form. And when we've looked at a lot of smart contracts, most smart contracts spend most of their time from what we can tell, if they're not doing cryptography. Obviously there's the special case of doing cryptographic operations, which we'll also get to, but a lot of them spend a lot of their time serializing and deserializing their data + +[15:00] Store. So the smart contract is invoked and it's given a byte buffer and then it spends all of this time in its virtual machine running in virtual machine instructions. are just like inspecting a byte buffer and copying values out of it into a data structure in memory, then operating on the data structure in memory and then re serializing it back to plate buffer, and that's a little bit weird for a few reasons. It's not a good, efficient use of computational resources because that's like compute, expensive work, and you're doing it on the virtual machine instead of the host where it would be on in native code, and it's also it's very likely to be shared or shareable right. If you were using a data structure that is similar to other data structures that other people are using, the idea that you would need to do custom serialization, deserialization, as opposed to just reusing a library that does it, is a little funny. It's sort of it's making every smart contract ship + +[16:00] Redundant, duplicate contract code that does the same thing as someone else could do. And on that note, with respect to commonality, the more the data types are common between smart contracts, the more you can interoperate between smart contracts. So if smart contract a wants to call smart contract b, it's going to want to pass arguments, right accounts and amounts and maybe vectors containing things, maybe large numbers or cryptographic data types or whatever, and in order to pass data from one contract to another. I mean a contract is just a program, and for two programs to interoperate they have to have a common data language. And so if you assume that you're going to have this interoperability requirement between contracts where they share some kind of data vocabulary, then necessarily the serialization and deserialization code has to be shared. And so factoring that serialization, deserialization code out, having it be part of the platform and having + +[17:00] The user not actually have to ship serialized code but having the XDR pulled out of a ledger entry and turned into host objects and then just handing handles to those host objects to a contract, saves the contract from shipping any of that serialized code. It's is the contract from having to deal with compatibility issues because everyone's using the same data model. It just seems like a win all around you know every time, We sort of thought about this problem from another angle. We found another way in which it seems advantageous. Third party browsers, for example, would be able to browse the stable data format. As opposed to many smart contract systems, every data value that the smart contract saves into the blockchain is just an opaque byte blob that no one can inspect. If we have a common data type such as the one that's presented here, all of the ledger entries will be at least structured so you could browse + +[18:00] Them. Now you might not be able to make total sense of the contents of it, but at least have some structure that you'd be able to browse, which would make diagnostics easier. It would make writing third party adapters to consuming events coming out of the system easier. It would make testing easier, creating mock data of attaching fuzzers to it. There's all sorts of advantages to using a common data format, so I tried to lay some of them out in the rationale section here, but it really does seem like this is a huge key to a better design for smart contracts than what we've seen in a lot of other platforms, and so again, that's sort of- why this made up the first building block CAP is because if we do adopt this approach, we'll be will be deeply structured by this decision and obviously, if we don't adopt it, if there's really strong community feedback. This is a terrible idea. Nobody likes it. If all the technical review says this is impossible, it won't be able to make it work, or it's + +[19:00] You know there's no way. You can never come up with common data types that everyone would find agreeable. That's really important information to know because it does seem very appealing. But if it's not going to work, that will also change our strategy very dramatically. So I've talked a lot right now. I don't know if you have any other questions or other things that you wanted to discuss about it. There's some detail in the CAP. Most of mostly conversation we've been having so far is just like around missing detail in the CAP or debating a set of objects. That's one of the big questions is like: which objects do you include as your sort of mvp first pass. What host objects do you include as your baseline? So I actually have one follow up question before we go deep into the details, which is: how do we encourage people to use these managed objects rather than to do something, you know, very offensive on the like the source language side, just because they can? Yeah, so this does not this, of course, this does not preclude, you know, shipping + +[20:00] Your own encoder and decoder and just storing. There's a, there's a storing, there's a data type in this CAP called a binary, which is just a raw bite array. and so if you want to store a rod by right, and you want to ship the serialized, deserialized code yourself, we're not gonna be able to stop you. I mean, it's a touring complete environment, right? So that's like trying to prevent someone from computing a particular function. It's just not, it's against the whole point of the platform to try to prevent that. But I think you'll have a fairly strong disincentive just from the fee model. If there's any type of pricing of resources here, which you know there has to be in order to make to arbitrate access to cpu and disk resources on the validators, it will be a lot more expensive to do it that way, and so I think you'll just. My hope anyway is obviously this won't work very well if the fees turned out the other way, but my hope and my assumption is that you'll be saving a ton of- you know, whatever- gas metering fees. The system is charging users by using host objects because they're so + +[21:00] Much faster and efficient. I mean, is it time to start digging into some of these details, or does anyone? I guess my first, the first question is: in general, do people feel pretty good about this model? I mean, so I have kind of a meta comment, which is that it seems to me, it seems, you know, having, you know, been involved in like a few standardization efforts, it seems a little weird to be contemplating a change of this scope without like a requirements document first, to reference, so like I have a bunch of nits, but to some extent it's gonna be like who knows? Because, like we don't know what the requirements are and aren't right. So I have, like I have huge concerns about the use of floating point values, for example. It just seems like I can think of like a million bugs that like really + +[22:00] Catastrophic bugs that are that could happen right, like certainly in my lifetime. Like literally, hardware has implemented ieee floating point incorrectly. Right on top of that there's like from one release to the next of, like the compiler runtime, there might be small changes, like that they might use like a different, not a number, representation, because there's like tons of ways of representing not a number, things like that. You know positive, negative, zero, so, but you know, maybe we need it, maybe we don't, but like this is just without a requirements document to go back to. Like how do we know whether some of these things are good ideas or not, or whether they're gonna serve our purposes? So David, we've been, you know, we've been like the one thing that we have going for us right now is that there's a lot of prior art in the world of smart contracts and we have made a list of like sample smart contracts that we + +[23:00] Are interested in seeing. Like that should be like a SEP or something there should be like this should reference like a document saying like you know, like these are the requirements for our for like smart contracts on Stellar, right, okay, that makes sense. I can work on that maybe in the meantime, if you have specific concerns, like the floating point thing is sounds like something we might want to discuss, I can speak to that a little bit. I mean just very briefly: wasm as a spec does include instructions for floating point. This CAP doesn't specifically preclude them. We've talked so far somewhat seriously about the possibility of actually filtering them out, of saying that all the floating point instructions equal invalid contract. + +[24:00] Just again, to sort of to allay fears, because you're not the first person to use expressed fear is your own floating point. Now, personally, myself, I'm a little bit more of a floating point believer, so I'm not as concerned about that. There are two sources of what we might consider non determinism or wiggle room in floating point behavior that are left open in the wasm spec. In all other respects, wasm just delegates directly to ie754 and if you do not implement 754 correctly, yeah, you're in error. That's that, that's that. And 754 has not actually changed in 40 years. So I mean I think that's actually a fairly stable thing to point to. Yeah, but it's not. the standard so much as like implementations of it which have been buggy. Or there's been like there have been like two cpus shipped in my lifetime that had bugs and they were like very high priority fixes. So I think saying like that, + +[25:00] Saying like okay, like, but these are like exactly the cpus that would be like running Stellar Core right, so this would be like catastrophic for Stellar Core. So again, if we have a requirements document and say here's like 10 contracts you want and like six of them need floating point, then we need floating point. If zero of them need floating point, then it seems crazy. I agree that we can decide whether or not to have floating point. I think referring to the f div bug as a reason that floating point is not a reliable piece of software, is hyperbolic. I don't think that's a fair objection. Floating point is very well defined at this point. What about compilers that, like you know, use 80 bit when it's in a register and 64 bit when it's been spilled to memory? Right, like things like that? So a 80 bit floating point is not a an accessible format in lesson. You only have access to the 64, all the operations. But I find it like there's like a bug in the run time that it would like accidentally use 80 bit, but you're saying like that it's not available. It's literally not available + +[26:00] On the target, the wasm target. does not have extended, floating point. There is, at the same time, to like figure out if there's a requirement for floating points before getting too deep into what might go wrong. I think it's the right one. Okay, so tell me, you're making that, you're gonna make that awesome sound. Yep, okay, are there other? You know? So floating points is we're putting a bracket on and it may come up, and it may not. We can have that discussion. Are there other like concerns like that we should get into now? Well, I think, like the big one is concurrency, right, like do we require? Like, if the requirements include sort of performance, then we need to be able to run, you know, smart contracts and multiple cores in parallel, which entails one set of requirements, like making sure that we don't run conflicting smart contracts in parallel. + +[27:00] If we don't need performance- that you know- then we don't need that. But it seems like that again is something that we need to decide pretty early and it needs to be driven by requirements. Yeah, I can speak to that a little bit. That is definitely something that we have been extensively considering and have, in fact, a prototype planned for. As with everything sort of above, the single contract, single vm interface, it's not specified in this CAP. So this CAP doesn't, for example, talk about ledger entries at all, and so any concurrency issues relate to access to the ledger. There is essentially no, there are no concurrent host objects in this model and I, I'm happy to spell that fact- that these host objects are isolated to a single vm, the single vm, single threaded, + +[28:00] Or sorry, to a single host context, which is may have multiple vms but they are executed on a single thread in serial. So there's no concurrent access to host objects whatsoever in this CAP, right? So this seems potentially not workable, depending on our requirements. Let me finish. Concurrent access to ledger entries is planned, so the ledger would be the shared concurrent data structure, the thing that it's meaningful to talk about concurrency with respect to, and that's something that we will be discussing in later CAP, because the transaction format for invoking transactions that we have planned includes a sort of- I guess a trick or a standard pattern from the deterministic database literature where transactions carry static read write sets, sort of staple to the transaction. So you know, + +[29:00] The semantics are strict serializability, with the caveat that it's implemented by, you know, deterministically partitioned read write sets, especially. There's no actual contention. There's no requirement for concurrency control because the concurrent executions are all on disjoint data. I think so. An account ID, for example, it's a type that doesn't exactly respond to an actual account, it's just, yeah, it's just the public key or whatever. And then the contract literally can't see any data that it hasn't pre declared as part of its rewrite sets. So there's a, there's essentially a static data structure on a static schedule for each transaction set that partitions it into non overlapping partitions and then run concurrently. So I agree, it's a concern. It's something that we have been thinking quite a lot about. I don't think most of its contents show up in this CAP. I think they show up in that the. + +[30:00] I think they show up in that the CAP that talks about the transaction life cycle and access to ledger, which will be coming later, but I can reiterate in this CAP that there is essentially where we're not going to be supporting the any of the wasm multi threading proposals that are floating around in the future for running inside one of these vms and that the multiple vms that exist inside of a host context that are described in this CAP are intended to be strictly serial semantics and that all of the objects would be private to that single thread that are described in this gap. So far, okay, cool, and so everything else other than floating point is like completely well defined in the wasm spec, like if you shift a 32 bit number by more than 32 bits, like that, that's like what does that do, for example, yeah, that's that is well defined in the lessons. Back the. There are, as I said, there are two floating point idiosyncrasies. They're actually not the ones you're talking about. There's some + +[31:00] Nan normalization, which all the interpreters we've been looking at. Have some option for normalizing bands after every operation is just expected to do and the state of the floating point environment flags. You have to. You know there's a hardware environment, the floating point environment, which control controls, the rounding mode and the exception propagation of the exceptions are eager or quiet, and that is assumed, that something. It's something you have to set and as a host environment you have to set it to a particular state before you invoke your wasm code. But there's no interface for changing it from inside, wisely code. So you know, so long as we specify what this, what the settings of the floating point environment are, it would be deterministic with respect to that. It's just that wasm as a spec doesn't define what you will set, the floating point environments settings too. So what happens if you right shift the, a 32 bit integer by 32. I don't know off the top of my + +[32:00] 32. I don't know off the top of my head. You'd have to look at the spec. I don't have the glass inspect memorized, but it's it. It's very well defined. It's one of the most well defined vms out there. I think probably that's ever been done. There was something else you were asking about, though. Oh yeah, everything else he is in wizard is very well defined. The big caveat in my mind is that there are no host functions specified here, but there are a bunch of host functions implied by the data model, right like there's a map and a vector and a big num, and you know you only have those data values if you expect to have functions that operate on them. And a completely reasonable objection, that caveat that one might have to this CAP is I don't think it's possible to specify functions on this data type unambiguously. So you know we should leave it out, and I think that's particularly when we're talking about cost models. That's something I'm somewhat concerned about is making sure + +[33:00] That it's that we're only including data types about which we can plausibly believe that we would be able to construct correct or at least worth case cost models for any of the reasonable operations that would be implied by the presence of that data type. So you know, big numbers imply there's going to be big number multiplication. Can we figure out a reasonable worst case cost model and make sure that's correct. I think we probably can. That's why I include big numbers. I think big numbers are reasonably well studied. But you know if that strikes at the heart of fear of non determinism or fear of impossibility of bounding cost models, I think that's something that would be a very reasonable sort of point of debate on this. CAP is like please leave this out because it's too scary itself, like actually doing an exercise to create and enumerate on any of those in the CAP itself, or is that something that's just a bunch of unnecessary work? + +[34:00] I don't. I mean the thing is we don't really. We don't have- at least I'm not, so I'm not really expert in CAP writing, but it seems to me that we don't have a section in CAPs that's sort of like miscellaneous discussion or speculation. And that's really what we'll be doing there. We will be speculating forwards to future CAPs and saying- and that's what's unusual about this app is that it is really incomplete. It's not a whole feature in and of itself. I say in the preamble to it that there's actually nothing new that a user can use here which is a little bit weird for CAP, and so it might make sense to include in that preamble, or in the context or justification section or something like that, a little bit of forward speculation about. You know, we assume that the following things are going to come. They'll probably look a little bit like this, but we're not specifying them here. It's just like for context. If you like, I'm happy to add that. No, it's okay, I think we are going to have those other CAPs, like the host function CAP, for example, is one that we need to + +[35:00] Kind of lay out as well, right, and we have a different CAP. I think that would be around the cost model and things like that, limits and cost models. So it's all, it's fine, it's more like: yeah, it's one that's like being covered in separate CAPs right now, yeah, like we started to do that with feedex- actually the whole like split up, you know, like an actual protocol, into many pieces, so that we have like parts that we can think about discuss, and there will be some of that here as well, of course. I think we want probably, yeah, now is going to say real quick. Like yeah, I think we want to leave some time to also talk about the other topic for today. So yeah, David, you have another question. Oh yeah, what? + +[36:00] So I guess I'm also trying to. understand like, so you can have like a 30 pin number, either as a value or as an object, a 32 bit number. There's no object type for a 32 bit number. There's only a version embedded in value, because it always fits in value is the 64 bit type, with the tag union on it. So there's no reason to ever box it. Essentially, sorry. So you have this part where you say like the only the data values that are shared are 32 and 64 bit numbers floating point in integer. And then you have these like tag types that include a 30 a 63 bit positive number and like a 32 bit unsigned integer and 32 bit signed integers. So how do those like what's the relationship between those? And like the fundamental values. Then I see, okay, yeah, sure, happy to talk about that. So the point that you're reading that says there are exactly four types of data + +[37:00] Values shared between guest and host. I'm maybe being splitting hairs in terminology here a little bit. These are essentially the data values mandated by the wasm interface. So these are the only possible data values. There's no way for us to talk about a data value that is not one of those four. When we're calling a function, for example, and passing arguments, the only argument types are those four. So well, I see what you're saying, We don't have, for example, a 64 bit integer, like we, even though we can pass a 64 bit integer, we always interpret it as one of these tagged things. Yeah, so what comes later on is- and I should actually clarify sort of that layering here- is that we're picking just the 64 bit type. We're actually only using the in n64 and we're saying every value we pass back and forth is going to be an n64, and we're going to use it as a tag union inside of itself to carry a bunch of different stuff, so some of which can be an object + +[38:00] Reference, which can be a small symbol. There's a bunch of little values in there. Well, should we move on and talk about the next agenda item, which is interoperability? There are a couple of questions here that I think yeah, I think we can ask more questions. on the dev meeting list, on the, runtime. All right, cool, let's switch topics then. So, excuse me, on the topic of interoperability, my questions kind of come back to the same kind of thing that David opened up with, which was like: what are the requirements? I've been thinking about this for, you know, the last like 10 days or whatever at this point, but it's really vague. What we actually want and one of the big questions that we keep coming back to, that keeps coming up in conversations I'm involved in, is, like, what should a smart contract fund seller be able to do? And one of the examples that + +[39:00] You know keeps coming up is, like you know, one of the things that becomes very centralized on seller today is managing the authorization for an asset. You know like what if you want to have, like, a daily transfer limit or something like that. Like these are things that are really annoying to do because it all has to be done to like a central server, etc. But a smart contract could fulfill this role in theory. Do we want to make it easy for smart contracts to do this stuff where basically they become like a gatekeeper to classic, or do we want to not make that easy and basically say like, hey, if you want to build these kinds of things, just build them entirely and smart and get the features there. And these are the kinds of questions I'm interested in the high level and there's more to like how this is structured. But first let's just open this up like: do we think those kinds of things should be possible? Or should we just say like hey, just like do that in smart. + +[40:00] And to help me like understand that, the actual question on this, like the low level type of you know super granular access control example, like the question there is like should people be able to do like a normal, like classic looking payment and it still ends up being gated by some smart contract? Is that what the question is? I can supply an example, but I mean understand the use case. It's more like in terms of the what is the question on the how? Is that really related to classic? I guess the way that I'm imagining any of this stuff working is like anything that comes through, like anything that a smart contract is involved in, originates on the smart contract side. So you'd imagine basically saying, okay, I'd like to make this payment, but then the smart contract does everything, all the operations and stuff. It's basically just a gatekeeper. It's like, okay, like you can do this. Then it goes and you know, issues the allow trust + +[41:00] And then it issues the payment and then you know it issues the revoked trust and etc. And it does all of that on the native, on the classic side, basically controlled by smart, so you don't submit a classic payment anymore. But everything is kind of happening classically, including all this allow trust stuff, which adds a lot of complexity, as we know. So I think like the payment alone is kind of an essential building block. You can't have classic, you can't have smart and classic like not even be connected by payments. That like that wouldn't make any sense. But the other stuff, you know managing trust lines, managing signers, managing authorization, all this other stuff. Do we want to allow that or do we want to just say, hey, I can't do that from smart? I mean, I think that, yeah, this is exactly getting to the core of like what, why I want the requirements that can be because, like one thing that like so definitely like, managing trust lines and allow trust is like a big pain it's, like something you might want to do is write a little smart contract says that, you know, if so and so has authorized you + +[42:00] To like hold their asset, then like, you can automatically hold my asset, right? So, like, is that the kind of thing we want to support? And if so, that requires like a fair amount of, like a fairly wide interface between smart and classic. Right, you know my bias a priori would be to say that we actually want that because, at the end of the day, we want- you know, we want as much, we want transactions to be as cheap as possible, because that's always been kind of the Stellar way, and so the more we can kind of like leverage stuff that's implemented in c plus and it's potentially sort of optimizable through maybe even future things like speed x, like the better. It also, of course, makes it easier to debug. It's, you know, makes interoperability better because everyone's speaking like the same language using the same objects. But again, it boils down to requirements. So I can you know, while we don't have this document of requirements, I'm happy + +[43:00] To throw some out there. You know a one, of the things that are very important for our ecosystem is that existing services and existing stakeholders don't become deprecated. So we're talking mostly about exchanges, about issuers, like circle. We want to make sure that their asset is still viable and fully functional with, you know, the new smart world. Now, what extent is it just like a user, just like you know, moves their value into the smart world and then, like it's a whole different primitives. I don't know, that's kind of like what we need to figure out, but we do want people to be able to issue smart assets as well. That is, you know a lot of people, a lot of issuers are using things like SEP 8 today to do things like you know, various velocity limits or like minimum balance on their native asset, + +[44:00] Limits, and today that it requires a lot of finagling. It would be great for people to be issued, to be able to issue them, similar to how erc20 tokens are issued, so, but we could keep step eight. So, in other words, like there would be a new signer type which would be like the smart contract endorses, and then you could just kind of like do it through the smart contract, but like we would keep the same framework or not, right? Yeah, I think we're like I think you're getting into, implementation here. I just I'm getting into the how you choose. This actually affects [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md), like whether certain things are a good idea or not, or what base objects we might want to have supported, and so you know, like, for example, one thing that is not in here is any kind of capability right that I can tell. So, like, if, like a smart contract wanted to like invoke some privilege or sign some other + +[45:00] Transaction, there'd be no way to do that. Maybe privileges, or whatever you want to call them, need to be some a kind of first class thing. My privileges here do you mean, like a contract has the privilege to do a certain action on a certain account that is not the contracts account, or it has a certain signing weight or something. Yeah, exactly, yeah, this gets into the like the second part of my question in the document, which is like the relationship between my contract signers, accounts and, like lee has proposed- this is lee here, I don't know, Yes, so leah's kind of proposed this model where, like, maybe a contract should be a signer in some sense, like, if a contract can generally control an accounts, trust lines, data entries, whatever the account + +[46:00] Itself, then it should be a signer on that account and for a variety of reasons, maybe it should be the only signer with any weight on that account, so it has like exclusive control. But in this world, like you could still imagine the kind of case where you want like a one off right, like I want to allow this contract to like withdraw funds from my account right now, a certain amount that I believe is going to be withdrawn, like 100 or something. And this gets like a little weirder. It's like, well, what do you say? Like okay, like the transaction that runs the smart contract invocation, like the account signs that transaction and that's what gives it the authorization. But how do you be certain that the con that the smart contract invocation, at the time you actually run it is going to do the thing that you thought it would do? It's a little different from, like you know, a seller contract where you're like this is a payment for fifty dollars signed, and that's where I think these kinds of privileges come in, like either you give + +[47:00] The account like one off control of your entire account at that signing weight to do anything. You know you say medium. Well, I can do anything at medium, whatever it wants. Or do you need some better control? I don't know, What do you think about this, David? I mean, yeah, I think that it cannot sound like a broken record, but like there are many possible answers here, you know, in a priori, without having the context requirements, might my kind of inclination would be to, you know, to make privilege something people tend to run into problems with, like ambient authority right when it's sort of like any time you do something, it's like if there's any possible way you're allowed to do it like you, can do it right. Think of all these like time of check the time of use bugs and stuff that happen in unix, and so to have some notion that a smart contract has privileges and then it needs to explicitly invoke particular privileges, like maybe there's a smart contract that has + +[48:00] We wanted to have power over two accounts and so it has two different privileges and when it's doing something we want to literally specify which of those two privileges it wants to do to avoid, you know, a kind of confused deputy problem. So, without knowing the requirements, I would say like, I would like to: I it seems to me like maybe a good idea to have privileges be like a really explicit thing. But again, it's a little abstract right now. Yeah, definitely, if I can just chime in on one thing with respect to vocabularies and [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md)- this is just a minimal prefix, right, this is this the set of host objects we can think of right now so you get to add new ones in the future, if we don't have to have a complete list for this, that's all I'm saying. Sure, but you know, getting the sort of + +[49:00] Privilege model right from the start is probably a good idea. Like I don't know, there's, I mean there's- no concept of a privileged operation here, because there are no operations, so we don't. Yeah, all I'm saying is we don't have to worry about the weather. Privileges are properly modeled in this CAP. We can say: that is a thing we have to decide but doesn't necessarily hold this gap. One thing on the topic of privileges that I think is relevant here is like the bigger we make, the like interop surface area, the more complicated the privileged system has to be. If the privileged system basically amounts to, like you can only make payments, then it's like okay, like how much payment can I make from what account to what account, for what asset? That's like the whole space. I think it's sort of. unless that's right, it gets a little bit, yeah, more complicated about that. And, like you know, do we say that you can just do payments, or do we say that you can do payments at frequency? Or like had the number of cases, like we're + +[50:00] Almost saying that permissions are almost a contract that you can implement. So, like we've talked about privileges and permissions before, but I don't think we've really talked about how do we make it so that people can innovate on them? Because you know, we could define some basic privileges but then people will be stuck, sort of what they have today with classic Stellar, that they'll be limitations that they're trying to like work around or, you know, they end up just going and building their own erc20 like contract just to completely replace the entire system because the permissions don't give them access to what they want to do. I mean, I guess one of the things I'm saying is that Stellar actually does already have a notion of permissions or a privilege- that's called signing weight, and so we haven't really sort of formalized it or maybe like expressed it this way. But you know the, you know with without knowing. You know, as a first + +[51:00] Draft, we probably want to, like you know, use that exact same structure and not introduce something new. Like, where things tend to go wrong is if there's like two different- you know ways of talking about- two different privileged systems that are kind of operating on the same objects. Then you get like weird loopholes where people can you know? There's like a gap between the two that can maybe be exploited, so- but the signing weight is it's a really weak privilege system. That's what I was talking about at the beginning of this, where it's like I'm authorizing a contract to do stuff to my account at medium signing rate. What am I authorizing it to do, though? Like, unless I've audited the code, the contract might do anything. It's way more complicated than looking at a set of opera listeners. Contract is code, so this can only do what the code can do. Right, so sure, but I mean like what it might do could be complicated. It might actually even be very difficult for me to predict at the time that I + +[52:00] Signed the transaction. Like, imagine a contract where it has two functions. One function is hey, set a number, and the other function is hey, whatever number was set most recently, extract that amount from my account and send it to the contract. I. There's no way I can know what the contract is going to do when I sign for it. But at the time you add that contract, at the time you give that contract signing weight on your account. There's presumably some reason for that, right? I mean you decided you wanted to this contract have access to your account, sure, yeah, but like I don't necessarily want to give it unfettered control over my account, right, or even very strong. The contract is code, right, so the contract does and you decide whether or not you want that to have access to your account. Right, if the only thing a contract does is, you know, manipulate the + +[53:00] Authorized flag on trust lines, then you look at the code. You say great, like, I don't have to worry about this draining all my lumens by signing like huge account fees. What if the contract is mutable? Well, okay, so that's something that needs to be driven by the requirements document. Do we need mutable contracts? Right? General consensus in this room is yes. What that question there's general consensus on in this room and the answer is yes, okay. Well, I need to be convinced, like I'm not saying that you can't, but don't show me the requirements that tell us we want that what we want cannot be achieved through, like, multiple releases of a contract. It's just like having eval in an actual contract, right. But if you have multiple releases and a point of indirection- which is the pattern people have when they only have immutable contracts, if they have a point of direction, like a proxy or something, and then releases, you still have the possibility of the requirement, in fact, of people delegating authority to + +[54:00] The indirection point with permission, and saying I give permission to this indirection point, whatever it happens to be pointing to right now, to do something. And so you're right back with beautiful contracts. People will work their way around, which is something that is behaviorally equivalent to a mutable contract that has whatever permission you've given them. They'll either investigate another question on top of mutable contracts or via whatever indirection system you build. But I just want to say we are pretty much out of time. So if this feels like a great last question, I'd say ask it. If not, maybe we hold off. Well, I was just saying like you can have your contract, not have direct access to the account, but just have access to another contract. That only does certain things like, not the things you don't want to do, but anyway. So it seems like a requirement. We just keep coming back to this requirement. Stock that you're requesting would be the thing that would help guy guide us, figure out the answers to these questions. Okay, cool. I'd also like to propose perhaps moving this meeting to the dev Discord and having it weekly instead of every + +[55:00] Other week, because there's there are going to be a lot of questions that we need to answer, and do it quick with John cannon, cool. I mean, I think that we should ask that offline and just verify that works for everybody and look at calendars and I can do that after this meeting. Awesome thanks, everybody. See you next time. + +
diff --git a/meetings/2022-05-05.mdx b/meetings/2022-05-05.mdx new file mode 100644 index 0000000000..23363916ce --- /dev/null +++ b/meetings/2022-05-05.mdx @@ -0,0 +1,176 @@ +--- +title: "Classic Asset Wrapper Versus Adapter Models" +description: "This discussion continues the Jump Cannon design work by comparing two competing approaches for smart contract asset interoperability on Stellar, weighing ease of use, security, issuer controls, and ERC-20 compatibility." +authors: + - david-mazieres + - jonathan-jove + - justin-rice + - leigh-mcculloch + - siddharth-suresh + - tomer-weller +tags: + - legacy + - CAP-46-1 + - CAP-46-2 + - CAP-48 + - CAP-49 + - SEP-8 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session dives deeper into Project Jump Cannon by focusing on how existing “classic” Stellar assets should interoperate with smart contracts. The core of the conversation contrasts two proposed models—CAP-48 and CAP-49—and evaluates how each balances backward compatibility, developer ergonomics, security guarantees, and long-term ecosystem evolution. + +Participants explore the trade-offs between frictionless integration of existing assets versus a cleaner separation that more closely mirrors ERC-20 semantics. The discussion also broadens into related topics such as issuer compliance controls, wallet and Horizon implications, and how asset design choices may shape adoption, scalability, and user experience across both classic Stellar and Soroban. + +### Key Topics + +- Comparison of asset interoperability approaches: + - CAP-48’s adapter-style model that directly operates on classic trustlines. + - CAP-49’s wrap/unwrap model that escrows classic balances and mints ERC-20-like tokens for smart contract use. +- Design goals for interoperability: immediate usability of existing assets, minimal issuer effort, performance, and safety. +- ERC-20 compatibility concerns, including `balanceOf`, `totalSupply`, allowances, and predictable transfer semantics. +- How wrap/unwrap boundaries isolate classic mechanics while enabling cleaner smart-contract abstractions. +- Preservation of issuer controls such as authorization, revocation, and clawback when assets move into smart contracts. +- Security considerations around signing authority and preventing smart contracts from over-spending user balances. +- UX and infrastructure implications for wallets, Horizon, and future data access layers when assets exist in both classic and smart contexts. +- Issuer decision-making trade-offs between classic assets (scale, cost, exchange compatibility) and smart-native assets (flexibility, programmability). +- Related CAP discussions: + - CAP-47 (now CAP-46-2) on smart contract lifecycle and mutability. + - Questions around contract versioning, auditability, and how downstream systems track contract code over time. + +### Resources + +- [Project Jump Cannon: Choosing WASM](https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm) +- [CAP-0046-01: WebAssembly Smart Contract Runtime Environment](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) +- [CAP-0046-02: Smart Contract Life Cycle](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md) +- [CAP-0048: Smart Contract Asset Interoperability](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) +- [CAP-0049: Classic Asset Wrapper Model](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) +- [SEP-0008: Regulated Assets](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0008.md) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to the Open Protocol Meeting obviously it is the first time we're doing it in this Discord channel. But most the conversation around Stellar development has moved to Discord. And so it makes sense to kind of put it all in one place most of the Jump Cannon work in fact all of it is really being done in the open here on the Jump Cannon channel and on the Jump Cannon dev channel. So anyone who's interested in seeing us work to bring smart contracts to Stellar can you know join in, that discussion. So today the these meetings these protocol meetings are here to discuss and plan for actual changes to the Stellar protocol and we are focused on Project Jump Cannon at this point and on, which will bring smart contracts to Stellar and we're talking here about the protocol changes, that are necessary in order to accommodate it to tackle Jump Cannon we actually decided to break up the suggested protocol changes into a suite of Core Advancement Proposal and at this point + +[01:00] they're actually four different CAPs, that are linked to three of, which are linked to in the media invite one, that just came in under the wire. But it's [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md). So those CAPs are [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) the webassembly smart contract runtime environment, which is what we discussed last meeting and it's basically a building block, that establishes a vocabulary, that we can use to talk about wasm-based smart contract environments 47 the smart contract life cycle defines the structure of smart contracts on Stellar and specifies how users can create update and remove them. Then [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) I think our two approaches to deal with smart contract asset interoperability. And so this meeting is being is like anyone can sort of watch it. Now now I believe David has a SDF role. So but we are also archiving this and posting it on youtube so, that people who can't be in the Discord channel who want to watch the protocol meeting can watch + +[02:00] it later. So with, that said you know we're talking about smart contracts I believe, that the first place the first set of issues, that is probably worth talking about relate to your CAPs John [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) deal with interoperability. So I feel like there are some questions, that are open, that we might want to it's probably useful to talk about. Now but John do you want to sort of give an overview of where we are with those two CAPs. And then we can take it from there sorry first time first I'm doing like a video call on discard I'm still getting a little oriented let me pull up the relevant documents give me one moment. And then I will give a little intro okay I'm ready sorry about, that + +[03:00] so basically these two proposals they relate to just one very simple question or simple sounding question, which is yes exactly. But some error cuts on, that which is we have some classic assets some of, which have like you know substantial value sitting beneath them and we'd like to be able to use those immediately as soon as you know we deploy jump can and smart contracts on the public network without and the kind of framework here is like well ideally we can use them without anybody doing anything they just kind of work like magic. So there's a couple of things, that go into, that one thing we'd like is for them to be pretty fast tokens are kind of like the framework on, which blockchain lives. So so, that to be really efficient in order to make the whole thing efficient the second thing is like there's no reason to think, that anchors + +[04:00] are going to instantly deploy new software to support interoperability between their existing assets like classic assets and smart contracts. And so ideally it just kind of we do it for them in some sense they everybody just gets it for free basically and there's one other requirement, which I'm. Now not remembering oh yeah and the third one is a security kind of situation, which is, that like. When I sign for a Stellar account at medium threshold I have like complete control over all of, that accounts balances meaning like I might be saying, that i'd like to make a five dollar payment but. If I have a million dollars in my account and I signed for a smart contract to control my account at medium threshold it could just make it a million dollar payment ideally we should make it so, that situation is completely impossible you can't just sign for the account to make payments basically. And so these two CAPs 48 and 49 are two different perspectives on this + +[05:00] problem [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) is kind of like what. If I did the thing, that was the most frictionless possible but. If you do, that you inherit a bunch of weird stuff left over from like the fact, that Stellar assets are not really quite exactly like your z20 assets or whatever other Ethereum standard you'd like I'm really focused on erc20 just. Because I'm familiar with it and it's classic and old. But like there are other standards, that could be relevant here. But I'm gonna keep saying here c20 and you should just interpret, that to be whatever standard you like [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) on the other hand is more like what. If we introduce a little bit of friction. But we make it so, that these things really look like erc20 assets and avoid any of these like weird questions like oh like what who did, that do there's no more ambiguity so, that's kind of my pitch for these two proposals + +[06:00] ultimately is it about choosing one path or the other is this like a choice between 48 and 49, that is something, that we need to contemplate and make or are they work together or you know given John given, that [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) was just like dumped this morning on the repo maybe you can give us like a high level overview and like what are the differences between [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) yes and to answer Justin's question it is an either or. But not like an either or and no other options exist it's like a these are two good options and there could be other good options I haven't thought of. But I have thought of a lot of options and these two definitely stand out as like probably the best given our requirements. So it's like either 48 or 49 or something, that none of us have thought of yet to go over what Tomer was talking about in terms of 49. Because I did kind of + +[07:00] just drop it on you guys at like 11 o'clock this 11 o'clock in my time this morning basically I went over the issues in [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md), which are listed in the design rationale there's four slash five of them depending on how you think of it and I went through and kind of designed a system, that solves them. So what is the functionality of before I go over how the issues are different or work out differently. So in [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) basically the machinery is like the like asset adapter or contract, that like everybody gets for free actually goes around and controls the Stellar trust line. So you know like. When you use the erc20 function transfer it actually goes and uses the payment operation to send a payment from this trust line to, that trust line or from this account to, that account. If it's needed whereas in [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md), that never happens + +[08:00] the only thing, that the only time the contract will ever use your trust line is. When you call these two functions wrap and unwrap or like. If you're used to we would call them as deposit withdraw. But this was confusing to Niko. So I'm sticking with rap and unwrap and we can change choose whatever names we like later. But I think nobody will be confused by the terms wrap and unwrap and basically like what the wrap function does is it effectively takes some stuff from your trust line puts it in some new type of ledger entry for the smart contract, which I'm going to call a wrapable balance name stinks whatever we can change it. And then it basically mints the corresponding erc20. And then all the other erc20 transactions operate on this you know erc20 type balance, that's just like a new thing completely and the opposite thing unwrap just does all of, that in reverse. And so like the erc20 stuff is like completely working on new ledger entries it doesn't + +[09:00] use any of the old machinery at all only the wrap and unwrap functions actually interop with the old machinery. And then to make all this stuff work you. Now have to add a few compliance functions like you have to add auth and clawback to the erc20 side of it but, that's like super simple machinery, that like I mean it's super easy to build I looked at how people have implemented it like I looked at I was actually looking at the USDC contracts to see how they implemented the auth and it's like exactly as simple as I was thinking we would do so, that was kind of a perfect fit in, that sense in terms of how this solves the problems. But actually before I continue anybody want to stop me and ask some questions otherwise i'll just go over how to solve the issues, that I see just for me to understand the there's still this like canonical one-to-one mapping between an asset and a wrapped asset yeah okay it's like a native implementation you do the same kind of thing where you + +[10:00] like say like hey like give me the contract ID for this asset and this contract ID is some kind of like native thing there's no wildling behind it just runs it has some interface, that we prescribe and there's exactly one of them for every asset, that you could ever make, that's it and they can store data and I'm not really talking about how the data is being stored in the CAP. Because we don't know what the fee model is yet. So like there will be some fee model I don't know who's paying for it I don't particularly care at this time and whatever we decide later should just retrofit onto, that's, that's, that basically any other questions before I go on okay. So why do I like the 49 version better than the 48 version. So I'm just going to go like over these the issues I noticed. So the first thing is like it's not + +[11:00] labeled as an issue in [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md). But it is one, which is like ideally you should be able to use a Stellar erc20 like a wrap an erc20 wrapping a Stellar classic asset however, that interface works ideally you should be able to use it exactly the same way you would use an erc20, that's not even possible in [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) even ignoring some of the other weird quirks, that like we have to agree on some design, that makes sense Because just. When you initialize a contract like let's say you have a liquidity pool contract and you have like a factory for it, that takes two assets, which are going to be the two assets, that you have reserves for well you'd have to actually check like hey is one of these assets and like a Stellar classic asset even. If it has an erc20 interface. Because you would still need to create the trust lines for it, that's a bummer. But at least it only happens one time in initialization, that doesn't happen [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) you don't + +[12:00] need to create the trust signs the contractor does all the magic and we don't need to be backwards compatible. Because it's all new machinery. However the fee model works I don't know. Then then the next thing is like what is the issue of balance like. If I take the [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) version and I ask the concept like hey like balance of the asset issuer what should I return. If I return 0 should I return in 60 formats should I return you at 256 max I mean like I have no idea what number to return there is no sensible answer the balance of the issuer is infinite and also zero sorry I think zero is a good answer for, that question. But like you can make a payment from the issuer in account. So surely the balance isn't zero sure it can mint right. But it does it like the it's the balance of like the currently minted you know the current circulating supply + +[13:00] I don't know man it's not compelling to me at all. Because like in this context like the function transfer for what like the function transfer. When called on the issuer actually has to me, which is not how an erc20 normally works and since it like. If I can call transfer I should have enough balance to make, that transfer you see. If the function transfer 100 would succeed. Then I must have a balance of greater than 100. I mean should we be rethinking how trust lines work in the legacy model like I think it might have been a design mistake, that like payment is how you create assets like I know like obviously a lot of people expect, that. Now but like maybe, that's something, that we should reconsider, that like actually an issuer can have a balance I totally agree, that I would love to + +[14:00] change, that. But it would be a lot of work, that I don't want to do in this context time wise. When like we could just fix the interface on the erc20 side of it so, that this doesn't happen anymore and, that's, that. If you want the same behavior use smart contracts, that's what [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) proposes this problem doesn't exist in cat49 the reason it doesn't exist in [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) is. Because the issuer. Now can't hold a real balance and transfer doesn't mint anymore. Because it's all operating on these wrapped up tokens Tomer's making a lot of faces. So I feel like he's doubtful about my argument here no I'm not awful about your argument I understand why like I think there's an elegance to [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md), which is like better separation between classic and smart. But we are putting a lot of burden on you know downstream systems in the user to actually like to wrap and unwrap tokens and like + +[15:00] present some sort of like a unified view of the universe, that like across the across these boundaries I agree about the unified view of the universe I don't really agree about the burden of wrapping and unwrapping there's a section about, that. But obviously I dropped this on you guys like two hours ago. So I don't assume you've all read it. But basically my argument is like well hey you have to go and work through this whole allowances thing anyway and. If you have to go and do, that you can wrap up your assets. When you're doing, that at the same time there's nothing stopping you from doing them at the exact same time. So like since your wallet is gonna guide you through the allowances anyway it might as well guide you through the wrapping, that's kind of my argument, that might not apply in the context of like the eip 20 2612 is, that the one where you have the permit function I have to go and look at how, that would work. But I assume we can do a similar thing for the wrapping anyway. So probably it would still be fine + +[16:00] okay any questions before I do another one am I actually being helpful here should I stop you're being very helpful John i'd love to hear Nico's take I know he's been having some technical challenges can anybody here okay John can you talk a bit about the various authorization models author required author revocable. While backable and how they would relate to the to both [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) yeah definitely. So in [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) everything kind of just works the way it used to + +[17:00] there's no magic to it at all the only catch is, that like you can't do the auth stuff from smart we could make it said you could. But there's like a wide variety of reasons why you should not be able to and, that might be a problem. But the in [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) the auth stuff works a little differently. But they it works differently in a like favorably good way everything is simpler and we can actually fix some things, that were broken or not broken perhaps. But less than ideal in CAP in classic. So the first thing, that's better is like in classic there are three authorization states you can be unauthorized authorized to maintain liabilities or authorized in [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) all of, that still exists in classic. But in smart there's only check you're either authorized you're an officer. Because there are no liabilities. So authorized to maintain liability is the same as unauthorized. So we don't even need to represent it at all glorious + +[18:00] glorious the next thing is like we had to do this like off clock sorry trustline clawback trustline flag fallback enabled something like, that I can't remember the name of it anymore to preserve backwards compatibility with trust lines, that like you didn't believe, that they would ever be clawbackable. But since clawback already exists before smart everybody anything, that anybody ever builds on smart should be aware of the fact, that clawback is possible. So we don't need, that flag either sweet. So now we solve two problems already. And so now basically the way, that auth works is it's just like a single bit on your account not on your account on your like stored in the contract data I guess stored in this like wrapable balance entry type of thing and all it does is it says whether you're allowed to use the contract or not and it's more I'm like representing it more in the like completely not allowed to use the contract sense + +[19:00] than just what like Stellar classic would allow for example. If your account is not authorized on the contract. Then you can't even use transfer from to transfer from two other people you can't even be you can never be the sender basically on this account and, that I didn't think of, that originally but, that is what is implemented in, that USDC contract, that I was looking at and, that approach makes a lot of sense to me. If I'm not authorized to use it for my own balances I probably shouldn't be authorized to use it for anybody else's balances either sounds dangerous. So so, that's the only real difference. But basically like author revocable clawback all of, that stuff, that can happen on the contract side of it's just determined by whatever the issuer flags. If the issue of flags are currently revocable irrevocable. If it's currently clawbackable you're applaud backable, that's it no other magic to it. So it's a much simpler implementation than exists on classic basically + +[20:00] does, that kind of answer your question oh I definitely have some other questions obviously wrapping you can only do from like a classic Stellar asset to a smart asset you won't be able to go the other way around right. So we're essentially saying, that like there will be like a new universe of like payments for example only for smart contract assets, that are completely in jump can like you won't be able to interoperate from the likes of the classic protocol I want to make sure I understand exactly what you're asking me + +[21:00] are you saying, that there will be an like are you saying, that basically like. When somebody wants to use a classic asset it ends up being only usable and smart until they send it back or you're saying, that this is kind of a model where people will be using like erc20 type assets on smart I'm saying let's say I'm issuing a new asset on the jump can inside doesn't relate at all to these wrapped assets or to any adapter whatsoever it's just like my own you know Tomer coin tomercoin will not be accessible from the classic protocol in any way like I wouldn't be able to use like a classic payment operation on this coin I mean we should obviously we can't prevent, that. But we should discourage it right like it would be a bummer. If if you know at some point we want to have like really cheap payments and trades + +[22:00] right. And so to the greatest extent possibly want to encourage people to use assets, that can participate in you know things like speed x right I mean I suppose in this context like. If you wanted something, that was you know, that could be converted to a classic asset what you would probably just do is actually make it a classic asset and use the wrapper right, that's what I'm saying, that basically like it will be of course it's true you know we will people can implement their own assets. But to the greatest extent possible we hope, that our classic assets will actually serve the needs of kind of ninety percent of the kinds of assets people + +[23:00] want to issue. But one would hope right otherwise yes no but. If I want to coin to have features, that are missing from the current seller protocol. Then I have no other choice right. So so I think regardless of what we do we need to acknowledge, that we're basically going into a world in, which there are two classes of assets, that are interoperable between these two runtimes and assets, that are only in Jump Cannon and like do not inter operate and it sounds like we're also saying, that classic assets, that already exist we wouldn't be able to add in like. If you any of those features tomorrow, that you want to add to tomocoin. If USDC wanted to add it had those same features they wouldn't be able to do, that. Because they're it's a classic asset is, that right you might migrate back into the native Protocol + +[24:00] I wouldn't actually rely on us building any new features on the old protocol I think, that's yeah John is smiling, that's all I needed. If that's the case. Then like we're actually by doing this we're not really planning a way to bring new features to existing classic assets. So is there going to be this slow overtime movement of assets, that just stop using classic. And then we lose the ability to have assets to speed x and things like, that to be honest I think, that somewhere coin is a bit of a of an edge case like. If we look at like it's not like assets on Stellar actually have very true real features missing from them like it look like we do have a good coverage of like asset features. So I'm not offended by the idea of like issuing them still on the classic side and wrapping them + +[25:00] I i'd like to find a way from the user experience to make it the wrapping parts less visible and not have like multiple balances for like the same hassle. But we can figure, that out later. Because the you know the ewe experience is not you know. When you're crypto native it makes sense. When you're like you know just you know arando trying to make their first steps in defa it's not like the best experience yeah I totally acknowledge, that one thing, that is kind of missing from the wrapped contract proposal, that I wrote, which I did think about this morning was whether there should be mint and burn operations on the smart side. If there were mint and burn operations on the smart side it would be possible to effectively issue a classic asset. And then always work on the smart side + +[26:00] and. So like your users could unwrap them. And then use them on classic and re-wrap them. If we wanted to preserve, that interoperability. But like in practice everything would happen on smart and people would never really be doing the wrapping and unwrapping unless they want to go back to classic and so, that in, that world you avoid the rap step like imagine. If like you have an anchor flow, that instead of issuing you the classic acid. Then you have to wrap it up and send it smart you just like they ask you like hey like do you want this to be smart or classic and. If you want it to be smarter they just issue it on the smart side. And then you don't have to do the wrapping or we could even do, that by make, that the default at some point, that might make the ux better for some cases, which maybe would be a lot of cases actually I don't know what do you think about, that summer slash lee it's yeah like it complicates things for the issue, and now they need to decide, which side belongs on ideally there would be like one + +[27:00] especially with like these Stellar assets, that we expect to be the majority we want to have like a very straightforward best practice and I think we might be just confusing the issue with adding, that functionality on the smart contract I also think we're asking the issuer to make a decision really on versus between scale and flexibility. So we want to be able to. If we want like what David said an issuer wants an asset to be used on things like speed x or on cheap payment operations they're going to be tempted to go, that scale route. But then, that might be conflicting with flexibility like. If they want to have I don't know some sort of SEP 8 like functionality, that allows, that's on chain, that you know maybe says you can use my asset. If you have already been authorized to use some other asset you know as a sort of like a kyc interface or something like, that + +[28:00] like. If they want to add something like, that later on or I don't know like they have to decide at the point of issuing am I going to be a smart asset or a an asset they can scale maybe, that well i'd have to think about whether there's a workaround for, that there might be one I'm not really sure. So I don't know I can't i'd have to think about, that more to answer what I think just to clarify. Because I was going to ask this question I think you just answered it with, that lee like I'm an issuer and I'm ch and I'm and there's a choice I can choose to issue a classic or I can choose to issue on smart advantage for issuing on classic is, that + +[29:00] has better scalability maybe true I'm just thinking like. If new issuers come to me the other advantages are choose to issue unclassic sorry can you say, that one more time Justin I couldn't hear the whole thing oh this well basically. When new issuers joined the network why would they choose to issue one classic well presumably the idea of classic rate is, that your asset is very well defined right there's just like you know four bits or whatever, that like tell everybody exactly what to expect of your asset and so, that lets you know the fact, that it's you know it's such a well-known quantity also unless it be like you know allows us to implement things more scalably + +[30:00] scalably and let's you know sort of guarantee like things like auth immutable right like guarantee, that there won't be weird surprises I think the big thing is, that classic will be cheaper and have more capacity, that's the main reason to do, that on classic. But like for example do we expect exchanges to offer both classic and smart assets or just classic assets like well you know. When when you're getting USDC or whatever will it be a smart USDC or will just be like classic USDC it'll be classic everything, that exchanges do exchanges move slow they move very slow and they're very resistant to introducing changes so. If you ever hope for you listed on you know kraken or wherever + +[31:00] like you'll want to make it classic, that's a good point yes yeah generally speaking I think, that except for like extreme use cases for with smart contracts, that have sophisticated non-trivial logic I definitely think we need to push everyone to issue on classic and use this whatever interoperability scheme we decide on. Then I think it becomes pretty important to make it as easy as possible to do this like those mint and burn operations I was talking about they sound important. Now now. Because like. If we want people to get like a unified smart user experience. But still have an underlying classic asset, that you can like actually unwrap, that would be the easiest way to get, that. So so we probably should do, that + +[32:00] yeah we're about to make Horizon's life a bit hellish with like figuring out these things and how to convey them to the wallets wait. But isn't the whole point of. If you issue your asset as a classic asset. Then Horizon should just work right, that's another yes yeah. But the question is like. If a user wants to do something on the smart side you know let's say I did something on the smart side, and now I have like part of my USDC balance is on classic part of the USDC thing is on Jump Cannon. Now Horizon needs to give you know give wallet some like unified view. But then why should Horizon even touch the smart side of things + +[33:00] I agree george can you expand on why you agree I would also love to hear more I was also like it just doesn't seem like Horizons place to make those kinds of to coalesce those separate things like. If you're making them separate. Then they should stay separate. Because we just give an accurate reflection of the core's state of the world and so. If you have them in two places. Then we reflect them in two places right well a wallet uses Horizon as it's kind of like you know just beta access layer into this into the Stellar universe right now. If a wallet classic wallet well no a wallet not just a classic wallet like I don't have any smart wallets + +[34:00] but I would imagine smart wallets maybe wouldn't well like Stellar wallets would want to add smart functionality right it's not going to be like two universes, that's the whole thing right you know. If I have a Stellar wallet and I want to be able to participate in a DeFi application and I need to have access to all those information and we need to find a way to present, that information you're going to need a level of extensibility, that Horizon just like it's not it's just not the Horizon way of doing things like maybe I have some asset and it like keeps its own ledger and like some weird you know compressed state or something right. So like I'm actually there's actually going to need to be like kind of server-side logic for decoding, that right and, that's not we don't want to be like loading, that stuff into Horizon right we want Horizon to be stable and predictable + +[35:00] predictable let me rephrase maybe Horizon is a bit of a loaded term. But we need to provide a data access layer to Stellar, that allows wallets to reason about what's going on the dumb side sorry classic side and to interoperate with these like wrap-on wrap functions. So this might not be Horizon this might be you know Horizon ng or whatever. But something needs to provide, that functionality and, that's something is you know like we're making its life a bit difficult well I mean I would argue, that something is going to need to be much more sort of modular and extensible than Horizon right it's just going to be a totally different architecture in fact you might have many instances of this and you have would have some Horizon ngs, that support some assets. But not other + +[36:00] assets right and I mean it would just be it's just going to be a much more heterogeneous ecosystem right assuming, that people are taking advantage of the smart contracts all right camera can I try to get some clarity on what you're actually saying here are you saying, that you want this thing I'm going to use the word Horizon and we're all just going to pretend we're talking about the same thing even, though we're not. But you want Horizon this data access layer to be able to talk about both the wrapped balances and the unwrapped classic balances like the trust line balances or are you saying, that you want it to be more generically useful for exploring the state of the smart contract universe outside of these asset adapter rapper things I think you're saying the first one. But I think David thinks you're saying the second one and maybe you're not saying either of them + +[37:00] stuff right. Now like I'm trying to think of by the way can you guys hear me I don't know how long you were talking. But I wasn't able to hear you for a. While at least okay can you guys hear me. Now yes okay awesome. So I'm talking about the wrapped assets specifically like the this kind of like dance between wrapped assets and regular assets and I'm thinking about. If I'm trying to you know just like top down design from a user perspective like the wallet experience all the experience they very they have like this universal view in, which a user doesn't have to think about you know, which side an asset is on right. So so either the wallet needs to have these like multiple services, that it interacts with or it has or needs to have like this unified you know service, that it interacts with, that provides it with like a unified view of the world + +[38:00] so I think, that regardless of this is something, that needs to happen and it doesn't necessarily actually yeah I think [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) might be more friendly for downstream. So I mean maybe the problem here is I didn't read these CAPs. Because I didn't hear about them in time. So to the extent, that the I guess, that there's a choice between explicitly wrapping and unwrapping versus like always having assets be sort of both classic and smart at the same time I would you know strongly favor the unified one. But again I haven't read the rationales and stuff you know assuming, that we do, that right. Then we're gonna have sort of classic assets, which are also accessible in smart contracts and we're gonna have like smart contract only assets. So the for the classic assets + +[39:00] Horizon should be sufficient right. Because you can access those to the classic interface for the new smart assets every asset is going to need its own Horizon modification potentially or like a lot of assets are. If they're. If they're doing interesting things. And so we're going to need a kind of modularity on the server side we're like I might issue a new kind of asset with like new properties I'm going to need to like also present kind of like Horizon side logic to do, that and I just to me it feels like danger like Horizon doesn't feel like it has the kind of modularity and extensibility, that we would want for, that we should think about a more like plug-in friendly architecture there's another angle to this too and, that is a Horizon like right. Now mostly Horizon just provides access to data and. If we just make sure, that. If we make it such, that Horizon has access to contract data technically + +[40:00] clients could still use, that contract data to display things like the balances or whatever you know they can look up in the contract data the same way the contract does there's some downsides to doing, that but, that's one way for us to sort I guess kick, that can down the road a little bit you know we can. But they can potentially be services, that show some specific balance format for specific types of assets it doesn't have to be a Horizon, though clients could just use Horizon to get like the raw contract data actually I think, that's, that's sort of like the model, that of the first demo, that paul had posted into the jump canon channel he posted a demo where you could use this Horizon like service to just get the raw contract data. And then the client can interpret it. However it wants + +[41:00] John is the main difference in terms of the interface between [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) like ignoring the wrapped asset part. But the other main difference just being supporting having, that total supply, that's the other interface difference. And then there's a couple other like ambiguity improvements like it's very clear what the answer of balance of is in all cases even outside of just the issuer case in [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) like is the balance the available balance or the raw balance this is one of the things, that I like kept coming back to. When I was writing [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) and I don't think I was consistent everywhere. But in [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) like there is no notion of the available balance from the context perspective it's just the balance. So like there's like some + +[42:00] interface improvements in, that like you get the total supply and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) you don't get in 48. But like the interface is also just more logical in [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) like I don't there's only one right answer basically in [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) for every function it just does the thing, that you expect it to do. Because there's no other thing, that it could possibly do we have to keep to the erc20 interface exactly, though like I could see it being. If we can solve the other problems so. If we found a solution to total supply for [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) we could say okay there's actually two balance functions there's you know the balance of and available our balance of in the Stellar ecosystem is, that fair and we definitely could do, that. And then we could basically say like hey like you know. When you implement a token on Stellar what you should do is implement both of these functions and probably there you want them to be the same function they just have the same implementation we definitely could go and do everything in reverse + +[43:00] so. So yeah a little bit ah it sucks okay maybe I need to shout or something it's good enough okay. Now I was going to say like the reason we have this on the classic side is. Because of liabilities right and actually liabilities we could have implemented, that differently like in smart the way you would do it is actually you would move the actual liquidity outside of the account right. So I think like you know like an offer basically would be you would actually move whatever you want to sell in the offer like, that's not the way we did it whatever but, that's right to me I don't know. If we need to expose those two concepts like balance wise like the available I mean the usable balance is the thing you want to expose like you know the fact, that we, that whatever is locked in our office i + +[44:00] mean this is not interesting I think from a smart contract point of view it's not a it's not usable like it's not available right I mean the main downside of, that though is like things, that should be true cease to be true like you should expect, that the total supply is equal to the sum of all the balances, that just won't be true this feels like a minor detail to me like the distinction about how we would expose these two values. Because we could just choose one or we could just expose both and doing either doesn't really break I don't think doing either would break a contract's ability to replicate what you know what's happening on other chains is, that not true I don't know. If it's true. But like. If your goal is to make it. So + +[45:00] that these tokens interoperate perfectly with what people are accustomed to using, which is my goal here like. If I could I would just like take the stuff, that people have been building on for five years and just like build exactly, that stuff on seller. Because that's how we're gonna get the fastest adoption and any place where it's not exactly obvious how it works exactly the same, that's friction, that's going to start with a slow adoption and. If it doesn't work exactly the same. Then it's really going to slow adoption. So like the first step is like is it obvious, that it works exactly the same. If the answer is no you probably have a problem and then. If it doesn't work exactly the same you have a second problem so, that's how I feel about this it's like anything we can do to make those answers like this is. So obvious, that you don't need to think about it and it does exactly what you're used to, that should be our goal I don't know. If people disagree with, that goal. But I think it's a really good goal like my goal would be to make this. So stupidly easy for people to use + +[46:00] because they're. So familiar with it, that they want to build right. Now immediately you know and unless there are things about the way, that systems currently work, that people hate there are and, that eip 2612 thing, that I mentioned was something, that Tomer pointed out, that some people found a big improvement about something, that people hate from erc20. So like it can be better. But but all of these things work better than classic Stellar assets I think at least for like the needs of smart contracts. So I mean I guess we have about 12 minutes left in this meeting do we want to keep like I know, that there's a + +[47:00] few other sort of questions, that came up outside the interoperability questions do we want to take this time to discuss those or should we stick with interoperability there's contract management questions, that I know there's [CAP-47](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md) talks about instance stuff, that we want to cover especially. Because the [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) 49 discussion for. Now I feel like there's a lot to think about and discuss it does feel like we got to a natural lull do you want to talk about the CAP, that you've been working on yeah I can start with the [CAP-47](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md). So all right cat47 just specifies how contract management would work + +[48:00] actually creating update and removing contracts. So it's know relatively simple compared to [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) and 48.49 48.49 lee had some questions related to this I think the most interesting one was we allow mutable contracts and the CAP doesn't specify any versioning right. Now so one of his questions was, that. If you allow immutable contracts how would you identify, which contract version was used for a specific invocation currently there's nothing in there, that you would have to you know replay history and go look at, which contract entry was used as specific invocation. So yeah. So the least question was around you know auditing, which contract was used I'm not sure. If this is something we should add lee do you want to talk about this + +[49:00] yeah I mean you basically set up like I just I think there's two components to it identification. And then referencing. So there should be some way I think for someone to speak concretely around about a specific version of a contract like you should be out there are going to be apis, that expose information about this is the transaction, that executed this is what it executed on there should it makes a lot of sense I think for those apis to be able to point exactly to the exact code and maybe, that's just a hash of the code and. If if so, that's, that's great we just need to include, that in the CAP maybe it doesn't even need to be stored on the ledger I'm not sure. If that's important it could just be a hash, that's not actually stored and you have to calculate it. If you want to calculate it there just needs to be yeah and I i + +[50:00] guess in terms of storing in the ledger like the question I had was like you know do we need to install, that in the transaction results or meta or something like, that so, that there is something more concrete or do you just have to know this transaction executed on ledger five and. If you go and look at ledger five at the state of the ledger you can see, that this is the contract, that was there yeah and like. If we have to do, that just feels a bit ambiguous okay. But I think Nico may have mentioned something around you know there's a cost obviously to storing 32 byte hashes for everything. And so maybe this is actually not realistic to store like, that everywhere a contract gets used or something. So Nico I don't know. If you want to expand on, that + +[51:00] seems to be having on and off issues connecting to this yeah I can hear you right. Now tomorrow you have been cutting in and out a bit, though from time to time yeah is this something, that downstream systems can do you know. When they index changes they can also index what version of the contract caused these changes I think. So or at least maybe george might be able to shed some light on this I think one thing we have had some issues with like + +[52:00] backfilling some of this data in the past. So I think one example would be you know claimable balances don't track who created them and it would actually seem simple for us to say oh yeah downstream systems should be able to just figure, that out. But it was actually really challenging I'm actually not sure. If we actually found a solution for, that maybe there's going to be an ask to the Horizon team who are sort of experts in this field yeah I would say it's pretty hard to say right. Now especially for me I mostly joined as a listener. So I'm not super familiar with how these CAPs are suggested to be implemented right. Now but I do know, that like we have the key value data access model, that paul is working on, which provides access I guess to contracts. But maybe not. So much how + +[53:00] those contracts have been interacted with so, that's definitely tbd from the Horizon side I will say, that I felt a little bad in retrospect about the not putting who created a claimable balance on it earlier drafts of the proposal had, that feature and we convinced ourselves, that it was a waste to do it, which was basically just saying like yo Horizon y'all figure it out, which like maybe wasn't super nice in retrospect. But I understand why we did it at the time and I don't know. If I would like even knowing what I know. Now I'm not sure we would have done something differently for, that particular case. But there definitely is some like potential risk for like caveat in the future like I could imagine people wanting to know exactly what happened even on chain like hey like this contract had a bug in it like let me write a contract, that goes + +[54:00] and identifies all the bugged out data like I could imagine doing, that or maybe you do it often I don't know I'm not sure. But I don't think we should just assume, that the only obvious thing is to push the problem down downstream yeah I think, that's fair and specifically. Because in this case the identifier or the reference whatever call it could be derived from the data itself. So I think like the claim balance created creator example is more challenging. Because it wasn't actually derivable from the immediate data from the claimable balance itself whereas in this case it is like. If you have the wasn't code you could hash it + +[55:00] and. If we say the ID is the hash. Then it might not be the button one thing I was talking to slightly different topic. But still related to like code versioning stuff, that I was talking to darth about yesterday afternoon was about like maybe the state of like and like start had some reasons, that this is like maybe not a brilliant idea and I don't claim it is. But like basically like the idea, that I kind of had, which was weird was, that all contract code is mutable there are no immutability flags or anything for it. But it's only mutable from within, that own smart contract. So basically. If you want to be able to redeploy your smart contract you need to write a function + +[56:00] that can redeploy your smart contract. If it is missing your contract is immutable. If it is there. Then you can redeploy your smart contract using whatever scheme you want for signatures and stuff. So the main advantage I see to this is like you get immutability for free and you get mutability for free but. If you want me ability you just have to build whatever readability scheme you want. But the cash, that's pointed out, that made me think, that maybe this is not. So smart is he said, that. If you accidentally bork your contract and forget to deploy your redeploy function you're dead you're just straight dead. So so yeah, that's, that's your risk basically yeah like we I think we concluded with the idea, that maybe it might be a good idea to allow both you know you can update the contract with update manager or update contract app or you can allow the contract code itself + +[57:00] up to upgrade the contract let me obviously have to think about this a bit more but, that might be a good idea. If you update. If you deploy your contract. When is the new contract available like the current ledger no it'd probably be the next led you're right yeah it'd be the next ledger it would I guess the operation would basically be like hey at the end of this ledger write the new code kind of like the same thing as like. When we were working on speed x it's like a speed x create offer operation means like hey. When you go into the speed x phase make this offer and try to trade it. So so there's some interesting applications of, that you know you could write a contract, that actually contains multiple versions + +[58:00] of itself and it could change, which version of itself is currently going to be deployed based off some other inputs yeah, that's it's intriguing it's funny you say, that. Because I was thinking about things like, that too I was thinking about deploying a client, that can write itself back to the chain, which I thought was just like intriguingly weird. But there's some examples of like twines, that can write themselves back even under this case of corruption. So like you could imagine a contract, that could have its data corrupted. And then write itself back to the contract anyway, which is pretty neat I have no idea how, that could possibly be useful. But like there probably are useful versions of these kinds of machinery it feels like a good thought to end on since especially since we're out of time. So we are going to be having these meetings weekly I don't know. If we're having well we'll sort of deal with scheduling in the background. But so for anyone who's watching you can find + +[59:00] them in this channel we're also going to be discussing this stuff asynchronously in the jump canon and jump canon dev channels here on Discord thanks everybody I will talk to you all soon you + +
diff --git a/meetings/2022-05-12.mdx b/meetings/2022-05-12.mdx new file mode 100644 index 0000000000..3e8146ca82 --- /dev/null +++ b/meetings/2022-05-12.mdx @@ -0,0 +1,161 @@ +--- +title: "Soroban Asset Compliance and Contract Mutability" +description: "This session continues the Jump Cannon discussion by examining how compliance features of classic Stellar assets—such as authorization, freezing, and clawback—interact with Soroban smart contracts, alongside debates on contract mutability and developer responsibility." +authors: + - david-mazieres + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry +tags: + - legacy + - CAP-40 + - CAP-48 + - CAP-49 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This Open Protocol Discussion stays focused on Project Jump Cannon, turning from foundational runtime questions to practical concerns around asset compliance and smart contract behavior. The core theme is how existing issuer controls—authorization requirements, freezing, and clawback—should (or should not) extend into Soroban without breaking contracts or undermining regulatory expectations. + +Much of the conversation revolves around CAP-48 and CAP-49, debating whether classic assets flowing into contracts can preserve today’s compliance guarantees. Participants also explore the implications of mutable versus immutable contracts, the risks of contracts entering irrecoverable states, and how much responsibility should fall on contract developers versus protocol defaults. + +### Key Topics + +- Compliance challenges for smart contracts holding classic assets with authorization, freeze, or clawback features. +- CAP-48 vs CAP-49 approaches to asset interoperability and where issuer controls should be enforced. +- Risks of contracts acting as “escape hatches” that allow users to bypass KYC or authorization by parking funds in contracts. +- Debate over whether issuers must manually “chase funds” through downstream contracts when enforcing compliance. +- Contract mutability as a safety valve: upgrading or repairing contracts after unexpected asset state changes. +- Concerns about immutable contracts becoming permanently stuck or unfairly drained when issuer actions change balances. +- Comparisons to Ethereum patterns (e.g., blacklistable ERC-20 tokens) and what lessons apply to Stellar. +- Ideas for developer opt-in/opt-out mechanisms when interacting with revocable or clawback-enabled assets. +- Tension between protocol simplicity, developer usability, and preserving issuer expectations from classic Stellar. + +### Resources + +- [Project Jump Cannon: Choosing WASM](https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm) +- [CAP-40: Protocol-Level Asset Clawback](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) +- [CAP-48: Smart Contract Asset Interoperability](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) +- [CAP-49: Classic Asset Wrapper Model](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) + +
+ Video Transcript + +[00:00] Hey everyone, welcome to the Stellar protocol discussion for May 12- 2022. In these meetings we discuss and plan for upcoming changes to versions of the Stellar protocol, changes that are necessary to evolve the protocol to meet ecosystem needs. I'm just real quick. A word on venue. You know, last week we tried having this discussion in the Stellar developer Discord and there were some technical issues. So we're back to live streaming this on youtube. You know, we just don't want to let the technical issues of a platform prevent us from having these discussions so that we can talk about the substance and keep moving forward. We may experiment with other venues in the future, but you know we'll make sure to stress, test platforms, + +[01:00] So there's just not technical glitches that prevent the conversation from happening. That said, we are discussing this week some CAPs that are related to Project Jump Cannon, which is the an announcement that we made earlier this year that we are working to bring smart contracts to Stellar and as part of that there are protocol changes that need to happen. It's been a bit different this time because we've sort of modularized those changes into a series of Core Advancement Proposal, each of which focuses on a specific aspect or specific type of change that needs to happen to accommodate smart contracts. Now, if you're watching this right now, it's a technical discussion. So I advise looking at the links in the show notes, which are the actual links to the Core Advancement Proposal that we're going to discuss and to the discussions that exist on the Stellar dev mailing list, which is where sort of long form question and answers happen. In addition to the Stellar debt mailing list, there's also the Stellar dev Discord and there's a Jump Cannon and a jump can and dev channel in there and anyone can join either of those forums in order to like see the discussion unfold or + +[02:00] Participate in the discussion. We're doing this in the open so that you know you can sort of see the work happening, or and also so that you can have a voice in the changes as we make them. So, that said, there are a number of CAPs that are on the table right now that relate to smart contracts into Jump Cannon, and we discussed a few of them last week. I think this week we're going to start out by talking about some of the classic smart interoperability questions that remain, most of which have to do with [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md), that are kind of two different flavors of how to approach that. And as recently as this morning there was some interesting discussion on the mailing list about that, and so I think we start there and I think, I don't know, John, do you want to? start? Honestly, I'd rather just let eco start. I mean, Nico posted this huge thing with a bunch of questions and I responded to it. So like it's kind of Niko's turn to talk, in the sense that I'm the most recent speaker on the thread. So maybe we can just kind of go + +[03:00] Through and we can try to figure out. Basically just kind of talk through some of these questions and understand what we're actually talking about and make sure we're on the same page on a variety of different things and if we're not on the same page, That sounds good. Wall of text: yeah, sorry, yeah. So I guess, like, maybe I don't know- like one of the things we can talk about is actually around compliance, because I think that's actually the most important bit that is in there. Okay, I mean, like I'm, I think we, should discuss potentially changing. I mean maybe not this time around, but like you know that this is like in there's an level of expectation in terms of on the compliance front, that I think we should maybe adjust, even in classic, so that we can + +[04:00] Make things work for John cannon. So like the type of things I'm talking about here is that both proposals kind of suffer from the same problem. So today, if you have an asset that you issued on the seller network and you like our version, basically we only have like a couple of smart contracts basically on today, right, like we have emms and the DEX, those are like the kind of equivalent and today we track. If you have, like money or any asset locked into a contract, it's going to be recovered. Basically, when you, your authorization gets revoked. And I think this breaks completely in Jump Cannon. Because, well, that's actually not well if we don't do anything, right, like because the only thing that gets really revoked is + +[05:00] Your trust line, like the ground of your trust line, like you lose access to your wallet. But if you parked your money in some smart contract- that is the you know, like the I know shady guy on the corner- basically there's no way to enforce anything there. So then the question is like, well, what do we do? Because if you don't do anything? In both proposals we're basically saying, well, you know, if you want to get around any kind of restrictions, just, you know, send your money to this smart contract over there. And then it's like this super mixer or something. And then, like you can, you can't reasonably have enforcement, so maybe we can start with that. Like we can say, well, that's okay, that's the new reality, new reality. And then my, then I would argue that we should just stop doing what we are + +[06:00] Doing in classic like, or at least say that we are going to stop doing, you know, recover basically like assets from pools or from our other DEX, like, if you know, because that's actually a lot of the overhead that we have today in classic. So I'll start with that, like I don't know what people think, so I can just start with saying what, how. I responded to your question to the same question from the email thread, which was basically like: this does work in [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md). You just might not like the way it works but, like, all the compliance controls that exist on classic still exist. They just don't extend to all the contracts by default. So you might recall- Nico would definitely recall, because we spoke about it a lot when we were doing the liquidity pools- that we had this discussion about whether we should go back and make people, you know, revoke trust on, revoke authorization on every single liquidity + +[07:00] Pool individually or do the whole thing globally and basically, like usability for issuers run out what one out over? Like simplicity of protocol design and we ended up making it so that, like, basically you revoke and it just gets everything automatically. But both [CAP-40](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0040.md) and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) just revert back to the hey you have to go and manually chase everything down mode, which is the only mode that is sensible in smart contract land, like there's. The only possible moods are you have to go and chase everything down yourself or you can do nothing. But they're the third mode of it, Does it all automatically- is not a feasible solution here. But basically, the way that you would deal with this in the cap- let's talk about the [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) or [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) world, doesn't really matter for the sake of argument. So let's just say in [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md), the way you would do this is like hey, like, suppose that I want to evade authorization stuff and so I send my money to some random contract. + +[08:00] Well, you look off-chain and you say okay, like where is this guy been sending his money? Oh, he sent his money to this contract, frees that address and the issuer just goes through and kind of freezes everywhere where I'm sending money until they get all their money back. So basically, like your actions contaminate all the downstream systems and that works fine because at the end of the day, in [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) all the money is stored. Like when money is held by a contract, it's living in a trust line and so you can just use the normal authorization techniques on it. Or in the [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) world there's like the interoperability layer has authorization, and you can do the exact same thing. You just do it at the contract layer instead of at the inter, at the trustline layer, so you can do this. It's just kind of annoying, but it's not just nothing + +[09:00] So well. So the thing is that you can actually design better protocols that you want, like those smart contracts, firm compliance. You should design a compliance protocol that actually allows to do something a little better than that because, like this situation that you're like describing here- I mean it's like you're shutting down the bank because there's like one guy that you know that doesn't behave and that doesn't sound like. I mean I don't think you can like. I know I like the smart contract is the bank. Right, like it's kind of like that it's an automated thing that holds assets on behalf of a bunch of people. I don't think you can generally do better than that, the reason being: contracts are mutable and so like. Suppose that I have some contract and I claim to comply with your. You know your compliance protocol. Right, meaning like the you know it's a contract. It can hold acid a and acid b and they claim to control to comply with + +[10:00] Both. Acid as an asset. B is compliance protocol, and then at some point they decided, like you know, we don't want to do that and they just redeploy without the compliance controls and now that money is unrecoverable, so like, the only thing the only recourse the issuer would have against that would be to go and freeze up the bank anyway. So like, yes, you can design a protocol that has better properties than that. But the fact is that in a world with anything being mutable, you can't really like, you can't avoid the need to fall back on that, of course, as a fallback. I see that is, you know, reasonable, but if that's the only option, it's not the only option in the world. You can design whatever you want, right, but like in the context of classic assets that are authorized, if you are saying that the def, this kind of default world that we are, that they know they had like a bunch of comments on like, are we + +[11:00] Forcing people to be opted into this new thing that doesn't fit the expectation from people. I mean the basically like: if we don't opt people in, we'll end up in a world where everything is useless because people don't opt in. Like in a world where we can't like, where we can't assume that people will do anything, and that is the world we should assume we live in. Then basically all the work we've been doing for the last seven years could be a total waste. You know, like all the anchors we've onboard and stuff, if we can't get them to take the actions that would be required, it's just like: hey, we just threw it all out. Ideally, everything just works automatically and we do the same thing. And if you want to do something better than the same thing, then that's when you have to take action and that's the world I would expect. Like, for example, if you don't want this default behavior, hey, there's an easy solution to that: set your asset to auth required and then nobody will be able to send money into the smart contract side of it, + +[12:00] And then all you have to do is just, deploy your own wrapper and you get whatever functionality you want. But that's the action path. And since most people don't have author cards, that they don't have automatable set, they don't have author vocals, that they don't have off club backable or whatever the hell, that one is called clawback enabled or whatever this- the default behavior is what everybody is inheriting anyway all the time, right every day. I'm talking about specifically, yeah, like for the mutable stuff, I agree, We can basically kind of take the same stance that I think we've been taking with claimable balances, for example, which is you take whatever the current, the current statement of the issue is kind of what you inherit, right? No, that's not what I'm proposing at all. You're muted, oh, I don't know. Can you not hear me? I'm not muted on my side. There was nothing I can hear. I can hear him. He said: that's not what he's proposing at all. Okay, I don't know what I'm proposing is you, and can you hear me now? Yes, yeah, okay. What I'm proposing is that you inherit the current state of the + +[13:00] Flags, whatever they are right, that's what I'm saying. Yeah, like not. Like they don't get frozen. Like if I go and change my flags, I inherit the new flags. The way that it works for, like authors, no, but like if you want to, yeah, but no number. Like, well, if you wanted to, if you were saying I'm right now, I'm not earth required and then in future I become art required, well, it's kind of too late. You're all this stuff is now out in smart and you know, out of control. Basically like, oh, you have to. You're kind of yeah, and there's a great solution to that. You know what you do. You set yourself to author required and then you go and claw back everybody's money like would you really call that a great solution? Yeah, it is a great solution. Actually, the reason I consider it a great solution is because nobody has their flag set. So this can happen to you right now anyway. Basically, + +[14:00] Like you know, like any issuer can come and just freeze up all your assets whenever they want to and just say like, hey, like you have two options: either you are frozen forever or you accept my new terms of service and you basically do a swat, that's it. Anybody can do that at any time, and the thing I want to make happen is just have everything that works today, just working smart. That is like my number one goal, But the thing is I mean going back to this though? Like I think it kind of breaks everything. And smart, if you allow people to just kind of like, I take the example of a poo right, a trading pool. If I change the reserve in some random way, I'm breaking all the environments of that pool, correct? And so there are two solutions to that problem, both of which I actually laid out in the email I sent to you. The first one is to say like hey, like assets that aren't frozen as off immutable, can't use them. Basically, like you check in advance whether it's good to go and, if it's not good to go, you can't do it. + +[15:00] So in this case it'd be like automutable, not off, revocable. The other option is you actually have a guard in your contract which checks that the balances are what you expect them to be. So, basically, like you track the balances internally- there's only two in a liquidity bowl, so it's not a big deal- and then, when you actually start doing work, you check like hey, like, are the balances on chain equal to the balances that I expect? If not, I know that there was an external thing frozen and that's basically like a single bad actor poisoning the bank. But in that world there's a huge incentive for the owner of the contract to go and resolve the problem, to liberate all their other users. So well then if they use the user of our contract, what do you mean? Like many of those? No, well, like. What I mean? is that in many cases, the contract like: is this thing that you want to lock and that's kind of it. There's no like operator, right? I mean like, if you want to live in a world where you're using auth required assets and you're taking + +[16:00] Multiple people's money, you better have an operator, because those problems can't always be resolved on chain like there's. Like the standard liquidity pool token would never like design, would never let you escape that situation. But that's not the expectation in this web 3 world. Is that you want to have like a, like an mm, right? Like you don't want to have like some random operator, or like controlling the internals of the mm you might need to like if your contract is, immutable and you accept the funds and the funds can be frozen, then there's just literally no way to resolve your problems without an operator. So the solution, exactly. So that's why, like it sounds kind of broken to allow that to happen in the first place. I don't think it's broken. You should know better than to do that like the only other option is to prevent that and just say like, hey, + +[17:00] Like these assets are useless, that's what I think we should do. So you think that USDC should be useless? It should be like, if there is no, yes, like you know, I'm like it's leslie, okay, great, that's is either you allow people to claw back or, like make random changes to your state, breaking all environments. Like you can't write software, I mean you have to know all those environs ahead of time. Right, like what can happen? What are the situations that can happen with this one asset? That I can accept? You can't. You have to kind of inherit that in every single smart contract, because you have to make the assumption that, oh yeah, somebody can mess with my- you know- trust lines or whatever. Like you know the equivalent of trust lines. That sounds kind of super weird. So let me make a couple statements here. The first statement is: like you can write software that doesn't have this + +[18:00] Problem because you can have the guards on it. The second step: everybody needs to put the grounds, sure? The second statement is that on Ethereum, USDC is blacklistable. I've read the contract. It is very clearly blacklistable, in fact. I believe the term that they use in the contract actually is that it inherits blacklistable, so it's right there. And the third thing is there are liquidity pools that have that functionality nonetheless. That, like you know, you can go and trade USDC in a liquidity pool. So that's an extension to esc 20. Then that is blacklistable, right? Yeah, it's just additional functionality, right. But this is like then it goes back to maybe. Like you know, we are not implementing erc20 on the network. We're actually implementing something broader than that we have to. I don't agree, like those contracts, the interface that those that like unit swap is depending on is the erc20 interface. + +[19:00] The other stuff is just that's not true. You're saying that if, what you do, you have a new initial pool with USDC that can, like I'm pretty sure, like you take if you're attesting is true. If you, if there's a pool somewhere on Ethereum right that has USDC and USDC decides to blacklist the pool or do something crazy with it, the pool is now compromised. Basically it's done. I that could be the case, we'd have to go and review the code, but that could be the case. And then there's like potentially a cascading effect, right, like you know, people may have corrupted this as character or something like it basically has all sorts of weird things. Yeah, I think we'd have to go and confirm whether that's actually the case. But I do believe that is possible. But, like, the following statements are true: USDC is not authentable, + +[20:00] USDC has no flags yet set. It is unlikely we will end up in a world where USDC will set any of the flags, especially immutable, because they'd like to control their future. And specifically, like they won't set, not revocable, and we know that because their Ethereum one is revocable. So, like in this world, you basically are saying like, if we don't want to handle that, then USDC should be useless. On smart, and then I would question whether we should do this at all. I mean, I don't know, it sounds complicated to have, like the default being like this super complex thing to deal with. On the on smart side, + +[21:00] I mean maybe, but that means that we have to expose more than just the rc20 we might be able to. I mean one thing we might be able to do, is make it so that assets are like we might be able to do the clawback inheritance thing, because it's a lot easier to deal with frozen than claw back. Frozen, yeah, destroys everything. Yeah, well, it doesn't. Again, you can put the guards in which do exactly the same thing. It's not hard, but right, but then, but it's still the same thing- like you cannot use basically any assets, useful assets, what do you mean? Sorry, you're saying that USDC has, no, is not immutable, so it is not. But if we did the clawback inheritance thing, it wouldn't really matter. You run a contract, you can check whether it has clawback enabled and you don't have to do the clawback cards. And if you try to make a payment, you'll just fail because you're blacklisted. + +[22:00] Is there a next question we should move on to? It's definitely no. Like the thing that this conversation is really shows that it's going to be very hard for people to write smart contracts that don't break. Because if we are serious about compliance and if we don't add compliance into like the default thing that we're, or like if we don't address this beyond what 48 currently + +[23:00] Defines theoretically, people are still going to want functionality, probably like this for some assets, and so they're gonna go and implement them- this themselves, I guess, in a contract and then are we gonna end up in the same situation. Like you know, there are people are gonna write like a liquidity pool that doesn't handle those edge cases, just like what John you were pointing out on, happens on Ethereum. But to be clear, I don't know that. That's true for a fact. I suspected it right. But I mean like we're going to end up. We probably end up with something similar where we have the basic erc20. Somebody goes and defines a blacklistable or a deny listable interface. Some people ignore that, other people code around it. It's I don't know, it doesn't really sound like we're actually avoiding the problem by not implementing it in ours. We're just we're saying like we're not going to provide that interoperability if we don't- + +[24:00] I mean, I don't disagree with you- like if we don't provide this functionality and people require it, they will just build it. Right. So by deciding to build it or not, we're not actually changing with the weather, whether the problem exists or not. We're just saying that we're not going to support interoperability with these features of Stellar assets, right? So I think what it comes down to, I think is that it's like what I was saying, like what we need to then do is use classic assets as kind of the + +[25:00] Testing ground for what are those additional properties that you need to expose so that everybody, from day one, basically handles those such cases. There's like basically no chance we can achieve that I want to emphasize like number, like it could be as simple as yeah, like exposing the art, like some you know something. I mean like my argument would be like there's no way to globally handle callback, at least like the only thing that's going on. I'm not saying handle club like. I'm not saying handle club like clawback, I think requires its own set of protocol, like actual api, you know, like a. When I say protocol here, I mean like you know what are, like the methods that you need to expose as a smart contract in order to support kind of a nice low friction clawback. + +[26:00] But I don't think that would be in scope, in anytime soon. Like I think there is no, basically like we don't really support glowback in smart. I mean we don't support it as in you know, if clawback is enabled, well, first you have to know about it and then it potentially if you, if it can be enabled, you have to be able to handle the situations where it would happen. Isn't that enough? Like, as long as a contract can detect whether the assets it is interacting with is clawback enabled or sorry if the contracts trust line is callback and they would not the asset, then the contract knows whether it can take on that risk on or whether it wants to interact because, like, we have the same problem with starlight, with the payment channels. + +[27:00] Like payment channels don't work or payment channels are not as safe if the assets can just be clawed back out of the payment channel, like undefined. You end up in undefined behavior and you know when we worked on that we said: well, that's the risk you take if you do multi asset channels like you just need to work with the issuer. So are we saying this is a problem just because we expect people to write immutable contracts where there's no issue that you can go to? Like there's no one you can like talk to about the problem, like, is that the distinction? I mean, I think maybe, like it would help to have like some concrete, super concrete examples on the cable, because I mean I think what's weird is that there's, like there's, even without clawback enabled, there's, of course, the ultimate clawback, which is that someone could say: we're not going to honor this asset and we're going to issue some new asset because something went horribly wrong, right, so you could always just make the issue, can always + +[28:00] Make the token useless. So I think that we're worried about this like this. I mean it's not maybe not improbable, but it's this kind of weird situation where someone kind of does something that is kind of changes the rules, but such that we don't believe that the asset is worthless anymore, but now we no longer know whom it belongs to it. Some somehow like broken the attribution of who owns the asset, right, like this is what we're worried about. Like if the asset were worthless, okay, fine, that was the risk we took, But if, like, that is still worth something. But maybe there's a contract with two units of a and two people who are allowed to claim it and suddenly you've clawed back one and now the semantic says whoever gets their first gets to claim their and the other person doesn't. And we think that's unfair or like otherwise. Could you just like paint some of these like scenarios that we're concerned about, like concretely what it might look like? I mean, I think one concrete scenario would be like you have a liquidity pool that's pegged with, like that, between two usd, + +[29:00] Like fully collateralized, stablecoins, call them usdt and USDC. And let's pretend, for the sake of argument, whether it's true or not, that usdt is in fact really actually truly collateralized. I'm not making a statement whether it is or isn't in this context. So let's pretend, how about you and usd? Sure, great usdi and usdb, and we'll just pretend we know which one's which. But and basically, like you have this pool and let's say it's got like 100 million usda and 100 million usdb, plus or minus epsilon on both sides, because maybe the price is actually like 99 cents or 99 9 cents or whatever, who cares. And then the issuer of usdb is like: oh, actually that was like osama bin laden, my bad. And gets a call from the us government and like: revokes 25 million dollars. And now the pool- and I'm sorry, I said revoked, I mean clause back 25 million dollars- and now the pool is kind of in this like super bowl state. + +[30:00] Well, actually, well, presumably what happened is immediately arbitragers would get in there and suck out a bunch of destroyed, meaning sucked out to arbitrage, and that's kind of bad. Your only defense against this: or you could either say like hey, like I checked that this asset was clawbackable and like that's a risk you took by putting money into this pool, or you can put. You can say like hey, like you can't make a pool if it's cloud backable or you can have a guard that checks whether the balances are still what those contracts thought. Let's, can we step back from mechanism a second and say, like what is it suppose that, like what we can do is between any two transactions? We can do whatever we want, right? So like, pretend we're god for a second and luke what is the ideal response to this? Look like so, first of all, it seems a little. Is it the terrorists who have actually invested in the liquidity pool? + +[31:00] So it's like what you would really want to do is sort of like revoke the assets in proportion. You'd want to kind of like revoke some. The us government can only, you know, let's say, revoke usda, not usdb. So what you should do is like you should like freeze in proportion to what's been clawed back, so that you keep the proportion of assets the same right. And ultimately, the right thing to do is that the terrorists are not allowed to withdraw their money from liquidity pool and everybody else it kind of is business as usual, right, I think that's what. Yeah, that's what you're doing. The current product, you know in our classic protocol today, yeah, exactly which one we have to withdraw, like, we first withdraw, like the pull shares for that individual. But I don't know if we want to replicate that behavior. Okay, so can we first figure out what we would want in an ideal world and then figure out what, whether there are sensible mechanisms that could achieve it. Well, what's your definition of what you want? That's what I'm trying to figure out. + +[32:00] I mean like you know. No, I mean like. what's even like? I guess maybe that's what you're asking. Like, what is even a desirable aspect of this? Exactly like, I guess. Let me describe to you what I would consider like the most ideal possible world. would probably be that every contract like: imagine you actually care about clawback. I don't know if we do, who cares? But let's pretend so, focusing on clawback, like the most ideal possible world would probably be that every contract implements a like issuer clawback function and basically, like the issuer's the only one who can call that. Whatever you want the issuer to be- I'm talking about native, like classic assets- and the issuer's first point of call is to call that admin function when it wants to do something. And if it does that, the contract hopefully does the right thing. And if the contract doesn't provide that functionality or doesn't do the right thing, then the issuer just takes whatever action is necessary in order for them to comply, + +[33:00] Whatever that means, and damn all who suffer. That's probably like the best possible world you could achieve. So like, let's say that's what we wanted to happen, okay, so actually, before I continue, does anybody have a better world? Okay, so basically, we're adding a new method that, like, you're required to implement, or maybe there's a default value, but like the sensible way to implement it is that you know you do whatever should happen, but like, basically, your contract gets to react every time funds are clawed back. No, that's not what I'm saying, oh, sorry, no, I guess. Where is this function defined? Is it defined on the contract that owns the asset or is it defined on every contract that holds an asset? I guess, yeah, every country could be the whole. It has + +[34:00] To be the contract that holds the asset. Like the pool has to implement that thing. Yeah, so the question is: we could do this. And so the question is like: is there a sensible default, like do we force everybody to implement this? Or like you know, but then that's like a barrier to like deploying your smart contract because I just like write more code, or is there some kind of default? Like the default is frozen until it gets upgraded or something, or I don't know. Like I think the default should be. you can- opt out basically of this whole thing. Like you can basically say I don't want to deal with assets that have those type of properties because it's you know, I see. So, if you haven't implemented this function, then you just can't hold certain assets that are: no, I totally don't think this makes any sense because I can implement this function and just assert- that could be my whole implementation. That's what I mean. So assert basically says: freeze the smart contract until- and you know, as soon as any funds have been called back from it. + +[35:00] I'm sorry I cut off late. Yeah, so he's put his contract. Yeah, I guess no, like I think David is probably right, you don't want to kind of have like the like in the case of a pool, right, you probably don't want to have like an arbitrary amount taken under your feet because this breaks all your environs. Instead, you want to just like what. You want that to happen and probably, in addition, the contract gets completely stopped. The default, I'm talking about the default. Yeah, so the default is that it should stop the contract, but also it should grant someone permission to upgrade the contract or something, right, I mean, you know, I mean like, basically what we need is like human intervention, right, like we did this thing assuming that these dollars were fine, and suddenly, like, the rules have completely changed. So of + +[36:00] Course, we could just lock all the assets in perpetuity, or we could, like you know, figure out some way to, like you know, arbitrate the situation, you know, which might require human judgment, because this wasn't something we just put, like a- yeah, like 20 digits proof of work, you know, unlock code on it. So I want to go back to something that you said, David, like, oh, like what you know, if you don't define this function, then you know you don't support these type of assets. I think like we're talking about two different things here. Like there's contracts deciding if they want to use an asset, and they can do that if we provide a way for them just to read their own trust line. Like, does my trust line have clawback enabled? Simple function, simple flag. No, but I'm saying like, if a contract has any trust line that gets clawed back, then there has to be a. Then there's a function to kind of deal with that. There's like a car? Yes, so I think what we're talking about is hooks, or + +[37:00] The idea of to hook into an event. That's happening, rather than having a function that does the thing that needs to happen. We just want, like a function that notifies the contract. This is happening, whether you like it or not, because you decided to hold a claw backable asset. So, like once you hold it, you can't get out of this happening, and then you just get told when it happens. So contracts don't have to provide that function. If they don't care that it happens, it'll just happen and the consequences will just be whatever they are. But if you care about it and you want to mutate your internal state, maybe, or update something, then you have the option to do that. But I guess what we're anticipating is that there are a bunch of situations where people write contracts without any possible idea, like they just can't imagine that USDC would ever like try to claw back their asset and yet, you know, two years later, USDC like sets the claw backable- whatever + +[38:00] It's called clawback enabled flag on their account and actually like claws something back right, and so, basically that though, like so clawback is designed so that once you hold, once you have a truss line, you can't get enabled. So I mean, we've already designed around that. I think the other thing we can do is, you know, we can make the SDK or however you're interacting, you're writing contracts, so that when the account, when the contract, says I'm willing to hold an asset- I'm assuming that we're going to have some way of saying that, like on Stellar, like on classic, you have to explicitly say that you're going to allow the outside to be clawbackable. And then, you know, maybe that call fails if you just haven't passed that flag in, if the asset is clawbackable like so then, coming back, I think, John, like your example then is not so scary, because in your example, either usda or usdb already had to be have clawback enabled, right. So is there some other example that's more scary? So actually, just to be clear, + +[39:00] The proposal. I wrote the like it was actually based off of the current flag, so like things could become claw backable later- we don't have to do it like that, but they can't become callbackable later for an existing trust line. Well, that's how it's. That's how like the current stuff works. That's not what my proposal specified, at least not [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md), [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md), I guess does work like that kind of default. It doesn't have to be like that. We could make it all work like this, just a little extra complexity to deal with some stuff. But it might save complexity somewhere else. But the thing that I'm like skeptical about here is like the notion of a callback here. Like you probably don't want to do callbacks. Callback, it could be that like the contract is stuck until someone calls the un the cleanup clawback method or something. But my argument would really be like to implement this the whole opposite way, + +[40:00] Which is basically like hey, like we have a strong recommendation for you to implement this function and if you implement it, we encourage issuers to use it. There is no promise that they will because at the end of the day, like if the issuer needs to do something unilaterally, they should be able to, and since then, sure, but why should this be up to the issuers, like the? I guess what I'm saying is like the invocation should be a result of something happening on classic, not of the issue or doing some kind of smart contract thing. No, it definitely has to be a smart contractor thing. First of all, because definitely should not be the issuer not paying gas to clean up other people's smart contracts who did not like anticipate something being well, that raises a problem. Then, because, like, if I ride my hook, my callback to use, like some obscene amount of gas, like who's going to pay for that, what? That's the point. Like somebody who + +[41:00] Cares about that contract, what? No, oh well, you're saying if it's called at a different time, I mean at the end of the day, like the callback could always fail, right, even if you implement it and you think it won't fail, it could still. Like, for example, like consider, like a usda usdb liquidity pool, right, what does a clawback on Stellar do. Well, it returns both assets. Well, what if the other asset you were already frozen? Well then, the operation would fail. And so they look like at the end of the day, like there has to be the support for a unilateral clawback anyway, and the contract has to handle that. And so, like, in the world where there is a unilateral clawback, we might as well just say that the contract should supply a function and the issuer should call it. But you have to deal with it if they don't. Anyway, if anybody can call this function, is that like can anybody call a function? + +[42:00] I would say that it would have. It should, have a guard on the issuer type of thing, So basically, it should check like hey, like is the person who can call this, the person who can call it. I mean, presumably what it should do. Like the function should like, examine all the trust lines and, if they're all in the expected state, not do anything and otherwise clean up the mass. Yeah, I feel like what we're describing is like some general function, like we're actually just going, a more general function that just is. Something might be wrong. Go check your state, yeah, figure it out if it's not. And like we're not actually talking about callbacks specifically, we're more talking about how to get a contract unstuck that gets in a bad state. That's not really talking. Sure, yeah, that's way harder to try. Like there's only one. If you're in a generically borked state, there's only one solution: human intervention. That's the only possible solution if you're generically borked. Because in what way are you borked? Well, only a human can determine that unless you've written a program that can analyze the state of all, the program of + +[43:00] Like all the possible states that you're in. And since, like that sounds generally hard and like generally not feasible in a gas like research constrained world, there's like no hope of that. So, okay, so the distinction of the function you're talking about is that because only the issuer could call this, because there's a guard for that, me writing a contract. I can trust the issue. We'll only call this if they are in fact doing this thing. I'm very clearly saying like there's no protocol specifications here at all. The only thing I'm saying is like it is a recommendation to people who write contracts that they should provide a function that implements clawback if they would like to hold callbackable assets right. But as the contract author, I need to be thinking about where my trust lies. So if this function is going to mutate the internal state of my contract, I've got to be trusting the issuer to only call it when this is actually happening. I don't necessarily know what you mean- like the function should be implemented + +[44:00] In such a way that it leave like it. Like leaves your contract in a same state. So like if you want it to only be callable by the issuer- which would be the same thing to do- you should check that, but you don't have to check that. In theory, you could let anybody close out on your contract. It just sucks. You do not want to be called by the issuer, right? The issuer is not, in general, going to know about every smart contract using their asset. Well, you want to be. callable by anyone, but only do anything if, like, something unexpected is happening in the state of your trust lines. Right? So you want like a, basically. You want basically a like a board on board check. That's at the beginning of most of your methods, right? And then you want to clean up the mess method that, like, anybody can call to, like, clean up the thing that's causing all the other methods to, I think John's function. The issuer is clawing back from the contracts account and so the issue knows about the contract, in this case because they're clawing it back. So that's why they know to call that function on + +[45:00] The contract. And I mean, clearly, why did they care? Like that's not, like maybe the issuer only does classic, you know. Like does USDC care about smart contracts? Like they're there to provide an asset that has a well defined you know, redemption path, and like that's fine. They're not here to like parse people's smart contracts and spend a bunch of gas doing whatever right. So I feel like what we need to do is just have a way for a people who care about a smart contract to clean up the mess when the smart contracts assumptions about assets are no longer correct or about trust lines, which, in theory, where you know, we've been talking about mutable contracts, so like, even if a contract is in the worst possible state, somebody can upload a new version of that contract to fix it. Only if somebody can actually do so. Right. But, like, Nico protested + +[46:00] Against my notion that these contracts would even be mutable. If they're mutable, there's no problem. Like in the world where the contracts are immutable and somebody actually has the authority to mute them, mutate them, whether that is like an issue, like not an issue, sorry- an organization, an entity or a person, there's no problem. The accounts get frozen or the accounts get called back. The issuer- just, you know, the owner of that contract- just calls up the issuer of the asset and says like hey, like, how do we fix this problem? Oh, this is how we need to mutate the state. Cool, I'll write the function to do that. I mean one can imagine, you know, wanting to set up a contract that you can only change when it gets worked, or something like you know, maybe there's value for like yes, if suddenly, like uscc gets called back, then I get the ability to upgrade this contract, but usually I don't want this ability because, like you know, the contract should be its own logic. That would be easy to write, though. You could write that contract. That's not hard, you can do that without any magic at all. So + +[47:00] I don't know, I feel like we're not. all talking about the same thing here, and I think that's why we're not agreeing on stuff like: what kind of clawbacks? Are there other like types of edge case or edge cases that this would apply to? Or is that, or is it just the club like sorry, classic clawbacks, a clawback enabled asset that gets the trip that invokes clawback, classic clawback, right? Are there other state changes that could happen on classic that would lead to this situation. Or is this limited to clawbacks? I mean, like getting frozen would be not as complicated but equally bad. So an auth required token where you deauthorize the trust line. So there are the other auth flags. Because it's also like can we just say hey, no, clawback enabled assets in smart contracts can't use them + +[48:00] Maybe. I mean, like here's a way that you could implement this if we wanted that. Basically, like you can only create a in the [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md). You can only create a in the [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) world. You can only create a trust line for a smart contract to an asset that is already going. Basically, like that operation of creating the trust line would fail if you would end up with the clawback enabled flag on your trustline and then you just never end up in this world. Yeah, I think we should just make it like a choice. You know you call trust trustline with no options and that's the case. It just fails if it's callback enabled or there's like an additional flag or option you can pass. That just says like I'm willing to go the potentially unsafe route of having a trust line that's called like + +[49:00] Enabled, and then at least it's surfaced to the developer like it's in their face. It's the same thing that we're talking about with re entry yeah, the surface, so they know about it. Dave David, on the context of the cleanup method, I think the big issue with the cleanup method is, like there are worlds that you can enter where you don't know how to clean up, like you don't necessarily know what like suppose that an issue or claws back from your contract. Do you know why they did that? So how do you fix the state of your contract? Well, I'm thinking, like what the cleanup method could do is it could like add a signing key to, or somehow like enable the author of the contract to upgrade it, for example, or like it goes in safe mode or something, and then unlock some like a maintenance. I think that's one of the like patterns that some smart contracts have already, like you have, like even the ones that have like a dial, like you have. You know, you can basically go into this + +[50:00] Maintenance mode. It just takes like a week or something, like it's not instant. There was a bug a few months ago, actually on a I think a bridge protocol or something, where it was not a week, it was like a second instead. But yeah, I just I don't know Personally, I just feel like we're making this really complicated, like at the end of the day. I feel like usability should be front of mind for developers and like, if we make this complicated, nobody's going to use it and then we failed, right? So can I say? the question is so the point, but so the only we would need to have a default for this method, right? So like we're not saying like everybody has to implement a method, but like basically, the problem is that, like people are going to make assumptions about trust lines and every once in a while, these assumptions may like + +[51:00] Break, you know, like a way that would cause like really weird, like you know, unspecified behavior of the smart contract. And so should we have some default thing where, like, by default, the contract like enters maintenance mode or whatever, when these assumptions are broken? I mean that assumes that we, like we impose a maintenance mode onto the entire world, and I just don't think we should like, if you want that behavior- well, you said assert- what happens when an assert fails, like, are you seriously thinking about having an assert? Well, what I meant about in a search was like I was actually giving you a pathological case of where what you were proposing wouldn't work for the cleanup. Oh, it's just, it just asserts and fails. It says nothing, you cannot do it. It's like: oh what, what's good? Did that? Do you? Right, like anybody can like that, I for an assert to have an asset. You also need to write that condition in the first place. Right, like no, I'm saying that's not. Like a false, that's no. I see yeah, + +[52:00] That's no. I see yeah, number like even no, but like I was even thinking for, like you know, even if you have like all the defaults and all that stuff, like you have to write your environs in a way so that you can handle, you can know basically that you are blocked, and that sounds kind of not workable. Well, I mean, I guess you know to. My proposal would basically be that anytime a third party like forcibly does something to one of your trust lines, you get in a state where someone has to call this cleanup function. If you haven't implemented the cleanup function, then like nothing can happen to your contract. That just sounds undesirable to me. But maybe your contract sounds pretty undesirable too, which is that like whoever first, like, which is like a bunch of arbitragers go in and suck out like all the money that's there by accident. No, my proposal is just that, like, if you're a contract developer, you should handle it, figure out what you're saying, you can But how do we know people are going to + +[53:00] Do this right? We don't. And if they don't then they're then like it's the same thing as any other smartphone. So when they don't, do we want, like people to lose money to arbitragers or do we want, like the contract, to freeze? And people have to like debate, how to clean up the mess? What if it can't be cleaned up? Like what if there's nobody who owns the contract? That was the case that Nico was talking about. If nobody owns the contract, nobody can clean it up, and now the money's just locked up forever. Well then, maybe the default implementation of this function is that it like hands signing authority back to whichever account. Like created this market. What if they don't have a key anymore? Yeah, then everybody loses their money. Well, that's the world that Niko was talking about, with the contract being immutable. Like, again, if the contract is mutable, there's just not really a problem here. Well, it is a problem if everybody loses their money. No, in the world where the contracts are mutable and people can actually perform the mutation- I mean there's not really a problem because people can just come through and clean up the mess. No, sorry, they can't clean up the mess + +[54:00] If arbitragers have sucked out a bunch of money. That's why you should handle the guarding conditions that you need, if you do need them well. I feel like we're out of time for this discussion right here in person today, but it seems like there's a lot. I mean, I think we just got to keep working async on the dev discussion mailing list and on the Discord and reconvene here next week. I don't know. I don't know what the next steps are, though I mean like the thing I wanted to say is like I think for the first phase of this, I think I would do what's kind of what lee was saying, which is like we don't support global assets, right, like if you have to have on your trust line. Basically, this for cla, for those classic things I'm talking about for the first version, you can't, even you can't do anything right, and then you can deploy a contract, basically that uses that and then, + +[55:00] In parallel or later, we can figure out this global. Like how can we make clawback work in smart world and then retrofit this to classic? I think the main lines of work should be. The first. One should be: like: let's go and figure out what happens with these kinds of contracts on Ethereum. Like these problems exist. People are doing something. That's why they don't though. Right, because the one thing that's different here is that there's this: the classic world is going to kind of reach in and mess with the smart contract world in a way that can't happen in Ethereum. That's not true. Like I literally told you that USDC has an auth revoke function. It has revoked, like so it has. Yeah, you can revoke. I don't know if they have. Do they have a clawback? I don't think so, because that would be a. That's the one that really breaks everything. I mean they both break everything, but clawback something more. See you next week. Bye, + +[56:00] David, I see you next week, What people are doing, yeah, and how they're handling it, and then we should resume this. But again, like I'm like, I think it's a favor of keeping it simple. I know that there's like the theoretical possibilities in edge cases, but I think it's also worth pointing out that I don't know that there are any active clawback enabled assets on in classic right now. Just fyi, I agree with that. Yeah, all right, see everybody next week. + +
diff --git a/meetings/2022-05-17.mdx b/meetings/2022-05-17.mdx new file mode 100644 index 0000000000..b6963788d2 --- /dev/null +++ b/meetings/2022-05-17.mdx @@ -0,0 +1,159 @@ +--- +title: "Innovating Colombia's Financial System" +description: "A community panel exploring how blockchain, anchors, and stablecoins can modernize the U.S.–Colombia payment corridor, reduce remittance costs, and expand access to inclusive financial services through compliant, education-first approaches." +authors: + - amit-sharma + - diana-mejia + - gabriel-bizma + - ivan-mudryj +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session examines a pilot payment corridor between the United States and Colombia built on the Stellar network, with a focus on anchor services, compliance, and real-world adoption. Panelists from Anclap, Finclusive, and CAF (the Development Bank of Latin America) discuss how blockchain and stablecoins can lower cross-border payment costs, improve settlement speed, and support both household remittances and small business transactions. + +Beyond infrastructure, the conversation emphasizes the importance of financial education, digital literacy, and regulator collaboration. Speakers highlight how responsible design—combining AML/CFT controls, consumer protection, and behavioral approaches to financial education—can help users move from informal cash-based systems into the formal financial economy. + +### Key Topics + +- Using blockchain and stablecoins to reduce the cost and friction of U.S.–Colombia remittances. +- The role of anchors in providing compliant on- and off-ramps between fiat currencies and Stellar-based assets. +- How AML, KYC/KYB, and clawback features support trust and regulatory alignment. +- Benefits for small businesses, freelancers, and SMEs engaging in cross-border and B2B payments. +- Infrastructure-first approaches that enable other fintechs to build services on top of Stellar rails. +- Collaboration with regulators through sandboxes to responsibly deploy new payment models. +- Embedding financial and digital education into consumer experiences to drive real adoption. +- Broader use cases beyond remittances, including local payments, SME financing, and humanitarian aid. + +### Resources + +- [Stellar Network Overview](https://www.stellar.org/learn/intro-to-stellar) +- [Anchors on Stellar](/docs/learn/fundamentals/anchors) +- [USDC on Stellar](https://www.stellar.org/usdc) +- [Stellar and Financial Inclusion](https://www.stellar.org/impact/financial-inclusion) + +
+ Video Transcript + +[01:00] Okay, I think that we can start again. Good morning everyone in Latin America. Welcome to today's panel that we call it innovating colombian financial system. I'm Gabriel Bizama; I lead international policy at the Stellar Development Foundation and today we have three experienced panelists that we are going to discuss the opportunities and challenges of using blockchain technology for cross border payments and, specifically, what we are going to + +[02:00] Discuss is how the use of blockchain technology and stablecoins can expand the access and usage of affordable financial services and how, actually, they could provide a positive impact to inclusion and economy by contributing reducing the cost of cross border payments. So I think that everyone here that connect to this panel- they know that the average cost of cross border payments remains very high right now. So it amounts to almost six percent of the amount that you send, which is more than the double of the sustainable development goals target. That is actually three percent. So this high cost of remittances payments have real world impacts on individuals and small businesses. So, for example, high costs impact migrants seeking to some money home for their + +[03:00] Families, also small businesses, important products from foreign suppliers and non-profit organizations, such as eight organizations that are actually there. This person fans for humanitarian aid. So blockchain technology and the responsible design and use of stablecoins could enable more affordable cross border payments, including remittances, but also b2b payments that finalize in seconds and actually cause fractions of a cent, mainly due to the reduction of intermediation. So today we are going to get into the details of on a project that we are doing in colombia with two ecosystem partners of the Stellar Development Foundation, and also we will have diana from the latin american development bank to speak a little bit about how to embed financial education into the consumer experience. But + +[04:00] Before getting into the details and discussions with the panelists, I just want to give you a brief overview and intro to the Stellar network for those who are not familiar with it. So Stellar is an open blockchain with control capabilities for issuers, and what does this mean? So actually is a blockchain, is a network that actually gives, on the one hand, controls to asset issuers, that ensures safety and security of a permission or closed network, but, at the same time, it leverage openness and interpretivity that foster competition, innovation and inclusion, and the some examples of this, just to give you some clear examples, is AML and cft features and clovax. So something that is called authorization require + +[05:00] Is a flag that it could be. When you, when an asset is your, issues an asset, it could. He could make sure that it has this AML and cfd feature that ensures that a financial entity, when he's distributing an asset, identifies who is the holder of that asset, and this would gives controls to actually to the issuer that AML and cft regulations are being comply with. So we think that AML and cft is important, but also clawbacks are important. So if an asset issuer, such as a regulated financial entity or a fintech or a digital wallet or even a central bank that issues a cbdc, actually include this flag on the asset, gives the possibility of to the issuer to recover the funds in the event of a fraud, for example. But this + +[06:00] Is not only relevant because of in the case of fraudulent transactions, but also is relevant if, for example, any a user loss lose the private keys. So we think that clawbacks are really important and, just to step back a little bit and someday downstair, it was established in 2014 and so actually it's one of the blockchains that is most established and it's designed, or it was designed, for payments, like. The main use case right now on seller is crossword payments, but also we are working in other use cases, such as cbdc's and using blockchain for distributing humanitarian aid. Since it launched the network process more than 2 6 billion transactions and currently is processing more than six million transactions in a day to day basis. The + +[07:00] Other piece that is important: if you would like to promote the use of blockchain technology, that could as well promote financial inclusion. We need to make sure that the technology is sustainable, so one of the things that we would like to make sure is actually that the technology- yeah, it doesn't consume a lot of energy. So we are in the next couple of weeks, we are going to publish some research on the energy consumption on standard, and the data is really promising. So that is something that we are really happy of, and it is supported by the sera development foundation, which mission is to promote the access and use of financial services, and actually the foundation is a non-profit organization and is it's not a charity. Actually, it pays taxes in the U.S. Specifically in california, and the whole goal + +[08:00] Of the whole goal of the foundation is to support the development of the network. So with that, I think that we can start. But before starting, actually, this panel is in led up to the textured startup weekend, blockchain Mexico city. That is going to happen next weekend, and actually this is an event that brings together the community in Latin America that is building solutions in finance, but not only on Stellar, in any blockchain. So actually, we are inviting those developers and startups and fintechs and others that actually are building products in Latin America. And if you have a good idea to solve any problem for the target audience that we work in terms of payments, you are more than invited to attend in person, or even virtually, the hackathon. So + +[09:00] Actually, there you have the link to sign up. So feel free to do it. And if you have any questions, you can ask me, or Anki, after this panel, So, with that, I think that now we are ready to start the discussion. So, first of all, I would like to, I would ask, ivan to introduce himself and also provide a brief overview of unclapped and what is doing in Latin America and specifically in colombia. Hey, gabriel, how are you all? Thank you for joining us, Yeah, I am working in a club. here in Latin America. we are connecting all latin, american countries to the Stellar network through + +[10:00] Establishment. We issue establishment for every currency, every latin american currency, so people could access to these global payment networks- not only people, but also any platform, any business that needs or that finds an opportunity here to transact worldwide. I like the speed and almost free. Cool, who could get that tax is really fast. So we are working. We started in argentina, we are working in colombia and also in peru, and we are under our road map is to- work on other countries within this Latin America region. So that is what we do. We, that is what + +[11:00] Is what we do. We are connecting. every single latin american country to this global payment network. Thank you very much for that, iman. We are going to get into the titles of what unclub is doing specifically in colombia. But thank you very much for the intro. I mean, do you want to introduce yourself and provide a brief overview of inclusive and its products? Absolutely, thank you very much for having us and it's great to be here with yvonne and the Stellar team as well, and we're really excited about pursuing the partnership to connect latime countries and the cross border payments space. Weave inclusive. You can see from our name we are all about financial inclusion and share that mission with don klopsteller and the ecosystem. We are a hybrid fintech and regtech platform where we provide a full stack financial crimes, compliance or anti money laundering platform that enables all of the essential- know your customer, know your business, anti money laundering, + +[12:00] Anti fraud controls, transaction monitoring and the like- in one workflow, and it's built to enable both traditional financial services companies, compliance needs as well as decentralized financial services, including blockchain ecosystems like the Stellar network. So we enable a number of other anchors across the platform globally to ensure that there's a consistent approach for know your customer anti money laundering. With our global framework. We- you can think of us as a organ orchestration layer that brings under one roof is to do everything from identity verification and validation, legal entity verification and validation across the globe as other anchors and nodes on the Stellar blockchain originate: their clients, either retail individuals or business clients that need to have these essential KYC kyb elements in place, and then the associated transaction monitoring and other pieces of the anti money laundering stack. Importantly, we are + +[13:00] Very excited about our partnership with Stellar because they recognize that there is a need for a consistency of approach in the anti money laundering framework, as many jurisdictions have different rules governing. We are built to the global standards of the financial action task force and many in our company are actually former global regulators, whether they be with the us or others. By my part, for example, I spent the early part of my career at the U.S. Treasury department, working with institutions like the fatf on anti money laundering frameworks. So the advent of the Stellar ecosystem and blockchain enabling tools that facilitate a much faster, efficient cross border payments ecosystem is exciting in that regard, but they all need compliance in that regard. We therefore are also an anchor on the Stellar blockchain and the fintech side is we provide connectivity for those payments into the us traditional banking and payments ecosystem- ach payments, wire payments and the like- so we can connect with yvonne's team and the customers that + +[14:00] They are working with to facilitate cross border payments for household remittances, small business payments and the like- and enable on and off ramping for assets that are transacted on the Stellar blockchain, for example, like USDC that may come inbound to counterparts to be paid in the U.S. Where we can then off ramp to the U.S. Payment system and then push or pull funds using ach or wires and as well as outbound flows as well. So we perform two key functions: we're a gateway between traditional financial and banking ecosystems in the united states to be able to hold funds in accounts with our partner banks in the U.S. Access those ach and wire rails and then be able to on and off ramp for payments using the global Stellar blockchain ecosystem and working with partners like yvonne to really connect the latin american business and household community in their cross border payments needs, and by doing so, we enable a very consistent approach by also providing some proprietary capabilities around compliance backed digital identity credentials for clients that + +[15:00] Come into the ecosystem. That provides great efficiencies when we do KYC support, because of these often are very redundant, so we can issue and validate compliance backed verifiable digital credentials for clients within that ecosystem so, as different nodes originate their clients, they can have one KYC gateway and be able to have a utility in the ecosystem while, importantly, also protecting their privacy. And so the combination of these compliance tools with the blockchain elements that associate payments flows between digital assets and fiat assets, with this on and off ramp capability, is where we sit and we're excited about participating in growing the ecosystem and really driving cross border payments in a much more efficient but also secure and compliant way. You very much for that. Amir and inclusivis is a really good example on how financial entities are connected to Stellar. I actually connected to Stellar. + +[16:00] I actually care a lot about AML, cfd and compliance, so I'm really happy to hear all of all that I mean is going to tell us today. So, moving forward, diana, do you want to introduce yourself and provide a beef overview of like half? I think that most of the audience would know the cafe, but it would be great to hear that from you. Yeah, thank you, gabriella. Thank you for the invitation to participate in the webinar. Caf is a development back the development bank of Latin America, and we are committed to improving the quality of life of all Latin America's, latin americans and caribbeans. And our CAP actions promote sustainable development and the integration of the region and we support, we provide financial support + +[17:00] And consulting services to both the public and private sectors in our shell shareholder countries, which are 20 countries: 18 countries in Latin America and the caribbean, plus spain and portugal. In addition, we generate a knowledge to strengthen our public policies in Latin America and the caribbean in order to improve the quality and impact of the projects that we promote. I actually work as the coordinator of the financial inclusion program, which is a program within the private sector efforts. That cafe is working with all the countries in Latin America and the caribbean. Thank you very much for that brief overview, + +[18:00] Diana, and it's a pleasure having you here as well, and I'm making sure that you are involved in the project from a financial inclusion and also from a financial education standpoint. That is something that is really important for us. So, after this quick introductions, I think that we can start with the panel and we can get into the details on the project that we are doing in colombia. And just to give you a brief overview of that, what we are doing with unclap and amp inclusive is try to under pile up project, try to gather data on how much is the cost of sending funds across water using the Stellar blockchain, and we are really happy to see the results after that in terms of the pricing and the potential savings that actually someone sending money from the us and someone receiving money from colombia + +[19:00] Can make use in using blockchain. So, actually, with this background, I have the first question for you, amid, is: actually, what do you think that are the benefits and opportunities of using blockchain and stablecoins for cross border payments for someone who is living in the U.S. And is willing to send money to Latin America, for example to colombia. Thank you very much for the question. The core really comes down to three benefits: cost efficiency as well as security. Right. The ability to leverage stablecoins like USDC over the Stellar blockchain enable near real time payments between anchors on the blockchain on the ecosystem, and those anchors connect directly to their clients, and those clients can be individuals that hold + +[20:00] Wallets. They're operating with a native application to a fintech provider in that home country. Life inclusive is doing for businesses and others in the us and onclop is doing across latam. The ability to on ramp to USDC from local fiat currencies or, quite frankly, any other asset that's issued and transacted, whether they be stablecoins or all coins on Stellar, provides a tremendous amount of efficiency. So you can go from fiat to stable coin back to fiat in many ways much faster than you can send an international wire payment between two intermediaries that are then representing multiple banks or others between the ultimate client sender and ultimate beneficiary recipient, and so you get both the speed and efficiency associated with that. The cost efficiencies that are therefore enabled can streamline this, because when you go through multiple + +[21:00] Intermediaries, multiple correspondent banks, with three, five, seven plus day settlement times, you not only lose time to get that what may be a small dollar payment for a household, or even a large transaction value payment for a non-profit that is trying to provide organizations in need for humanitarian relief, or corporates and legal entities trying to make payroll payments, supply chain payments, vendor payments and others. You get a speed and efficiency as well as a cost advantage. Lastly, and importantly, the blockchain's underlying attributes give us certain capabilities from a security and compliance perspective. The commitment to risk and compliance within the context of the Stellar ecosystem helps benefit that. While, because we provide essential KYC kyb elements, know your customer, know your business elements and therefore issue unique digital credentials, what that enables is that in individual context in those countries, when they onboard a client, + +[22:00] An individual, and they go through the identity verification document, non documentary background sanction screening processes- many of these are very piecemeal and we can consolidate them through the inclusive ecosystem. They it, we can issue these credentials and now any node that is therefore needing to validate an individual or a business that's been onboarded and run through that KYC kyb process can do so near instantaneously. And that is a significant hurdle both from an efficiency and cost perspective, because anti money laundering elements tend to be expensive and tend to be very redundant. So this provides a an answer to the redundant applications of AML and KYC, while also creating security by enabling these verifiable credentials that protect underlying privacy information. Whereas in the traditional financial services context, when a sender sends money through multiple intermediaries to a recipient in a foreign country, you have to disclose underlying account and personal + +[23:00] Information through that payment chain, and one is able to do so now con, in adherence to those compliance rules, without having to attach the very underlying privacy information. And because the ledger is immutable, you can search both transactions from an anti fraud anti money laundering perspective as well as ensure that there's no illicit activity that's happening in the system so the immutability of the ledger the transparency that it provides while also cr creating a privacy protection layer is inherently part and parcel of blockchain technology when combined in this ecosystem like through the Stellar Development Foundation support of the Stellar network and companies like on klopp and inclusive now you get connectivity between asset issuance payments and the interconnectivity between stablecoins and fiat currencies in multiple markets with the KYC anti money laundering layer so net result is you get security speed and efficiency that's great + +[24:00] From a colombian perspective so actually someone who is receiving funds from the U.S. To colombia ivan what do you think that are the main benefits of using blockchain and also stablecoins for cross border payments ah the benefits amit has saw a lot of the benefits and I deeply agree on what he said but I see here I see that this technology the honor from services that we provide the whole ecosystem and the protection that tools that inclusive is bringing to the network allows not only to the money for remittance but also to small business to transact worldwide I mean this is technology that + +[25:00] Not only this is technology that not only allow us to help those people that couldn't or doesn't have access to financial services or are paid too much for getting money from abroad but also to small businesses to start offering their products and service to other countries in colombia the on like others com other countries of the region of the latin region argentina and others the necessity is clear because of the lack of formal jobs there are a huge cash network that probably and that's also something that we should work on it's something that hurts the + +[26:00] People economy maybe if you are not if you don't have the enough financial education and that is where we want to work here maybe you don't realize how much how are you losing using traditional rights or maybe you are used to use that rights but using this technology using this network it could benefit people not only to get money from a relative in the U.S. Faster and cheaper but also to start working on their personal economy I mean how to use money how to save how + +[27:00] To how you could offer your personal services like I don't know you could go through I don't know a ternary or to a live streamer a gamer and this phrase this blockchain race allow people to start transacting worldwide but using our domestic currency or the currency you want but at the end of the day if you when you need to consume and use local financial services for local payments or whatever you need there is a way to interact with between the digital network and the local financial system and that is what we do we will represent the local currency and allow people to move from one + +[28:00] World to the other world easily so I think I'm pretty sure that there is a this is a next job charter in our region when talking about financial services and on financial opportunities for people but I also like I look I also want to highlight the benefits that small businesses or freelancers or self employed people could be benefit of this entire network not only people who are receiving money from abroad but also to develop your economy as a and integrate yourself and provide your services worldwide and I think that is + +[29:00] Huge worldwide and I think that is huge probably and I'm sure we have to work really hard on financial education that is a must to have here but this technology and this ecosystem have a lot to do with people and as soon as the people start seeing and feeling the benefits of all this network sooner or later they will become more familiar with it and of course that is a process and we start trying to demonstrate that using this blockchain rails for transacting and for using your money on and accessing financial services it's the first step for a huge other opportunities in a next few + +[30:00] Years this is really interesting thank you very much for that ivana and I think that you touch upon different issues that I would like to ask to the anna in terms of personal finance and financial education so we have spoken a lot about benefits and opportunities but in this context we're actually in order to send money for example from the U.S. To colombia you need maybe a smartphone you need connectivity how do you think that financial education and financial literacy and also digital literacy is relevant for someone who is receiving money in colombia and how would you approach actually financial education in this context yes thank you gabriel I think + +[31:00] That we need to think first what is financial education because I mean just traditionally financial education programs have been built on the assumption that transferring information to financial consumers about how to use financial products would lead to better choices and improved financial health however evidence from numerous impact evaluations over the past decade suggests that this assumption is flawed and there is a growing body of evidence showing that interventions designed using principles from with behavioral economics are more likely to improve people's money management behavior than other interventions so financial inclusion we at CAP are working on programs on the improvement on + +[32:00] Financial inclusion and we see that this requires financially capable customers who actively use products for their benefits and the ultimate goal of financial education programs is to help develop financially capable customers those with the attitudes the knowledge the skills and behaviors to make financial decisions to improve their lives and there is also evidence that shows that higher levels of financial capabilities are related to greater levels of financial well being or financial health through financial attitudes skills and behavior such as a savings behavior previous own experience with the financial sector comparison among different financial institutions before acquiring a new financial product personal participation or involvement + +[33:00] In the in the financial decisions of the household among others so we see that for example this project could benefit a lot from incorporating financial education from this behavior perspective I mean taking into account the needs of the customer needs and their behavior and incorporating these kind of notches that will help the customer or the remittance receiver to improve their savings behavior for example so we see this as a very important opportunity to incorporate these financial education evidence into this product in colombia yeah I totally + +[34:00] Agree with that I mean if actually you need to make sure that when you approach financial education you can help individuals and adults to improve their financial capabilities and therefore their financial health by changing behavior so I think that this is the approach that evidence tell us and I'm happy to hear that the latin american development bank is working on that as well and can also contribute to do this to this project so moving forward in terms of not only what is going on in colombia but also what is going on in the region I would like to bail + +[35:00] On the answer on that amit previously made in terms of the offering of other products in Latin America so the question that I have for you I mean is whether of inclusive right now is planning to expand the offering of financial services in Latin America in addition to this pilot that is working with angle app so if you want to elaborate on that would be great sure thank you very much yes the short answer to that is yes and in a couple different and distinct ways one is we want to make sure that first and foremost that we can provide the essential financial crimes compliance AML toolkit so that any organization that's building within the context of the Stellar ecosystem has a you know truly a plug and play solution for their AML and KYC and kyb needs and that there's ecosystem efficiencies built in with some of the capabilities that we + +[36:00] Provide in particular with the digital digitally verifiable credentials that equally applied to individuals and small businesses like yvonne said in particular it's not just households and individuals that are trying to get access to financial services but also small businesses that really is the engine of global job growth certainly the case in the us certainly the case in latam and if we can create more enablements for them in two or three distinct ways one especially in markets that have a number of different macro economic humanitarian or other considerations to review one is whether they be capital controls high inflation rates and the like individuals at the household level and organizations want to know that when they hold value that value is secure and so to the extent that we can provide connectivity for organizations within the context of the lat time community like unclop and others + +[37:00] That can connect individuals to their therefore hold value in a U.S. Account that provides some stability with respect to that the insured accounts that we can provide through some of our bank partners the second is that there are a number of organizations including and built within the context of the Stellar ecosystem that are also providing savings and or other higher yield account structures so higher yield USDC for example where organizations and individuals can now hold value in much more stable ways that provide a little bit higher yield than say for example non interest bearing or other accounts that might happen the third is in the payment side and obviously that's a core value proposition with the Stellar ecosystem driving down the cost of payments but really folks really want to be able to transfer value securely and in near real time and while some of the organizations may be legal entities corporate enterprises are less concerned about the multi day + +[38:00] Settlement they just want to ensure that there is a secure way to send value they certainly want to minimize the market and other volatility characteristics that some of the alternative coins offer and this is where stablecoins like USDC play a very large value having interoperability into local fiat systems is the fourth space and congrats to the group and yvonne and others like him that are trying to provide the ability to connect fiat in local currency either as a payout or as a pay in to a either as a payout or as a pay in to a stable coin ecosystem that now is interoperable and then fifthly I should say is that what we are working to expand within the context of the Stellar community is obviously not only is there interconnectivity between latam and the us but there's greater interconnectivity between latam and Africa and Europe Africa and the us and you now have organizations that are globally engaged + +[39:00] We ourselves are a decentralized company we have individuals based all over the us but also in Europe and elsewhere and these small businesses now have the ability in a digitizing economy even if you're five people 10 people to have individual contractors subcontractors vendors suppliers counterparts all over the world and that requires an ecosystem that is community bank like but in a global and peer to peer functioning way that's exciting so as we have small businesses that enable an individual in rural Africa or in Europe or latam to transact just as an inter company set of financial services or they're transacting with their beneficiaries vendors suppliers and other counterparties in their ecosystem like gamers like subcontractors like freelancers like small medium merchants and the like operating in these marketplaces those marketplaces want to secure value + +[40:00] Transfer ecosystem and they want to do so compliantly so those are the four or five ways that we are looking to expand these services beyond just compliance thank you very much for that is impressive to be honest and I look forward to hearing more in a couple of months ivan do you want to share some details on the pilot that actually you are working with inclusive and also we are in a phase where at some point we are going to work as well with the latin american development bank and basically if you want to also provide some details on the road map on in colombia in the short and medium term that would be great okay yeah we are working together with inclusive on build and building bringing a solution for remittances okay between us and colombia + +[41:00] But people need to understand that what we are bringing at the end of the day is infrastructure so all other companies or developers can build solutions on top of this race but we what we are doing is just building brands compliance race and of course what we are doing today is working hand by hand with local regulators within colombia because of its highly necessary to them for to get our best understanding of what we do and what how this blockchain race could help not only people but also the entire financial industry providing more financial products and services to the people so what + +[42:00] We do is bring in bringing services bringing bring bringing infrastructure okay so not only us today we are working to bring a remittances platform for colombians to get money from their relatives in the us or send money to the us but once we can prove that this rate this infrastructure is compliant is it's huge for the standard traditional remittances services much faster and cheaper for people we will start working on letting + +[43:00] They know that it is an infrastructure that any other company within colombia or within any other country because we are a huge network can bring products financial services on top of this infrastructure and that is what we are working today we are still not life in colombia because of we need to have a huge understanding of what we do with regulators and of course we are working together with them and we are working to run the project within the local regulation sandbox for them to have the best understanding of what we do and of course + +[44:00] As bigger and as a robust the project could be if we work together on this of course the whole industry will be will get the benefits of this infrastructure I assume I think that probably in the next month we could launch the platform but again I'm a technical guy and I insist that I will I would love to see other companies within colombia or U.S. To start thinking financial solutions to build on top of these blockchain rails we are working to prove that is that it represents a huge benefit for people but please those companies or + +[45:00] Developers that are seeing this panel please have in mind that you can you could bring your solutions on top of this network so that is what we are doing still working with regulators to make pretty sure that we are okay and we have the grant to have the because the local ecosystem with this with and as soon as we could move forward you know in a few months we will launch the remittances platform but and start working on other solutions or other fine financial services like local payments or worldwide payments whatever I hope not + +[46:00] Being me who brings those solutions but other companies to build on top of the infrastructure we are bringing and that's the history and I'm really excited on what we are doing and of course having a huge partners like the caf and the Stellar develop foundation it's huge and I'm really optimistic on that we will make really noise on the latin american economist and people economy thank you very much for that ivan I think that you mentioned something that is really important that is actually working together with regulators so actually these public private partnerships just to make sure that you can not actually that + +[47:00] You can not actually the technology but actually the use cases are developed in different kind of economies so building on that answer diana from a development perspective from a financial inclusion perspective how do you think that actually regulators and policy makers can design a landscape that is that would promote the use of the responsible use of technology for bringing more inclusion so do you have some thoughts on that yes I think that I mean working together with the regulator and trying to be I mean part of the regulatory sandboxes in each country I think + +[48:00] It's it is very country I think it's it is very important as evangelist mentioned the work that he's been doing with the financial superintendency in colombia and the regulatory sandbox is very important and I think that these partnerships are key in order I mean for the companies to align to the regulation and also for the regulators because they learn a lot on the these innovative business models and they adore and they can adjust the regulations to that financial innovation so I think it is very important that is a win from the both sides and I also think that also incorporating these this aspect that we were talking about on financial education and consumer protection is key and + +[49:00] For the regulator this is very important this is not only important to the consumer but also from the regulation point of view to incorporate these financial education and the impact of the usage of the product on the consumer protection and also the financial health of the customer yeah I totally agree with that approach and what actually you have said I have the I think that we are running out of time so I have the last question for all of you maybe we can start with amit and then we can continue with evan and then with diana that actually we have been speaking a lot about cross border payments and remittances and b2b payments what other use cases you envision that could be + +[50:00] Built not only on blockchain but actually in the financial innovation space that would promote access and usage of financial services and actually contributing to those who are in informal economy to start operating in the formal economy in Latin America so I mean do you want to go first sure there are a couple and obviously thematically we've touched on some of them already the cross border remittance use case is not only very important as it relates to household remittances and those small dollar payments that individuals are making as some folks have opined and there's a lot of data out of the world bank imf and elsewhere that showed that especially outbound remittances from the U.S. To many latin countries and elsewhere in the world constitute very high percentage of even those countries gdp and the longevity for those households to + +[51:00] Actually get funds from the united states so the income economy if you will is just massive I think it's worth re emphasizing small business payments because like I said the global engine of job growth is small businesses the number of small medium enterprises and micro enterprises is really responsible for 90 plus percent of global job growth and this comes back to the changing nature of the job landscape where there are more contractors subcontractors freelancers doing work interconnected with companies all over the globe that need a much more secure and innovative financial services space small businesses are categorically seen as high risk by many traditional lenders so the not only the ability for those individuals that are themselves small businesses as a contractor or freelancer or what we call an independent worker but also those small businesses are competing for lending + +[52:00] Products and the extension of credit to just grow start and grow their businesses and so the that's another product that continues to be formulated within the context of fintechs as well as in blockchain enabled ecosystem so small dollar lending small business financing is massive to enable these companies to start and to grow and these can be enabled very easily in the same payments mechanisms that these blockchain networks provide like with Stellar and many of them need the appropriate due diligence to ensure that they've got the background legitimacy and effectively the worthiness to get that those products and services and then the third and fourth I would say is while we often talk about the inclusion aspects within the context of private industry the non-profit and humanitarian development sector cannot be overlooked here more and more of organizations are realizing that being able + +[53:00] To donate in non fiat assets is attractive we look at the conflict in ukraine within 48 72 hours the millions and millions of crypto assets that were donated on behalf of many ngos and individuals around the planet to support the cause in ukraine and this is not a one off there are many organizations many countries frontier markets those in conflict facing crises that quite frankly it's much easier for them to get a wallet and have tokens deposited and transferred to their wallet than it would be for them to get any kind of fiat asset because of the absence of banking systems especially when you think about refugees and others which you're talking about tens of millions of individuals worldwide that don't have access to them and then you add the billion or so people on the planet that are just not born with an ID well no bank traditional financial services company sees them as a legitimate party in the ecosystem and this is where not only + +[54:00] The blockchain from a compliance perspective digital identity perspective pays huge value but then the ability to then tokenize value and create an ecosystem where you can send value to them directly in a way that's secure and then the final piece I will just say because it's a little outside of the financial inclusion mandate but equally relevant the financial services ecosystem and payments ecosystem is in radical need of innovation anyway and that pays tremendous dividends to corporate enterprises imagine the hundreds of billions of dollars that are transacted between corporates you know on a daily monthly and yearly basis to be able to tokenize that financing be able to do it near real time and securely takes a lot of noise and rent seeking behavior that is in the traditional financial space with the number of financial intermediaries that blockchain ecosystems like Stellar can really provide and so I think that provides a much more inclusive secure economy and I think that's where the economy and I think + +[55:00] That's where the future will be that's a great answer ivan I know that we are we have only a couple of minutes so if you want to yeah I think more use cases could be local payments because of there are a lot of people that probably do not transact worldwide but they need and they deserve to have a way to use their money locally so it doesn't matter if they want to transact worldwide but using your money locally so it's also a great use case to use digital money and also what am I saying b2b payments and b2c payments I imagine in a few years having go into a market and paying with my phone with a cure and the merchant with + +[56:00] A cure and the merchant will have the money in their wallet instantly at no cost and I'm paying the same way and I imagine that local payments b2b payments and b2 b2c payments yeah totally agree with that diana what do you think yeah very quick I think more on the financial innovation perspective I see more consumer centric approaches or more consumer centric products that takes into account I mean in the design are the needs of the different segments of population for example women products that are that have this gender perspective for example and also that includes one thing that I think it + +[57:00] Is important and I didn't touch in my in the first question which is the promotion of digital literacy because I mean as a part of the financial education programs is it is important to pay special attention to foster the digital skills and knowledge of the population and the most vulnerable populations who are the ones that traditionally have been excluded from the financial system and also lacks these digital capabilities and it is important to close this gap and not open another gap as we have on financial capabilities yeah totally agree with that as well so thank you very much for joining + +[58:00] This panel we have heard actually what are the benefits and opportunities of using the blockchain technology and stablecoins and we have heard that security reduction of costs mitigating AML and cft trace abilities and our benefit that I got here but also helping or contributing to small businesses to transition from the informal economy to the formal economy but also in terms of not challenges but we also need to consider financial literacy and financial education in terms of making sure that those receiving funds in a cross border payments scenario have all the knowledge and all the capability in order to use the financial services you know in a good way so evidence tell us that actually you can play + +[59:00] Around actually you can play around with different variables or elements in order to make sure that the impact of that financial education schemes are greater so with that we are finalizing the panel I would like to thank you for participating today and I'm looking forward to continue this conversation in the nearly future so thank you very much again for the time + +
diff --git a/meetings/2022-05-19.mdx b/meetings/2022-05-19.mdx new file mode 100644 index 0000000000..9e61bc36bd --- /dev/null +++ b/meetings/2022-05-19.mdx @@ -0,0 +1,178 @@ +--- +title: "Invoke Contract Authorization and Aliasing" +description: "A deep-dive protocol discussion on Soroban contract invocation, authorization models, and aliasing, focusing on CAP-50’s invoke contract transaction and its interaction with earlier smart contract CAPs." +authors: + - david-mazieres + - graydon-hoare + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: + - legacy + - CAP-15 + - CAP-46-1 + - CAP-46-2 + - CAP-48 + - CAP-49 + - CAP-50 + - SEP-30 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This discussion continues the Project Jump Cannon series, focusing on how Soroban smart contracts should be invoked, authorized, and identified within the Stellar protocol. The session introduces and debates CAP-50, which defines a new `invoke contract` transaction that separates the classic transaction source from the runtime contract invoker. + +Much of the conversation explores the implications of this shift for developer ergonomics, security, and ecosystem interoperability. Participants examine how Ethereum-style `message.sender` semantics can coexist with Stellar’s existing account model, while also surfacing concerns around aliasing, multisig compatibility, replay protection, and user-facing complexity. + +### Key Topics + +- Overview of CAP-50 and the new `invoke contract` transaction structure. +- Nested signature model: separating the transaction source account from the Soroban invoker. +- How `get_invoker` enables delegated execution, reusable signers, and contract-to-contract calls. +- Aliasing risks when the same public key may exist in both classic and smart-contract contexts. +- Interactions between CAP-50 and asset interoperability proposals in CAP-48 and CAP-49. +- Trade-offs between single-signer efficiency and classic Stellar multisig guarantees. +- Wallet and UX challenges around account migration, key reuse, and preventing lost funds. +- Replay protection and authorization semantics for contract calls that move assets. +- Ongoing debate over protocol parameters (e.g., contract code size limits) versus fixed XDR values. + +### Resources + +- [Project Jump Cannon Blog: Choosing WASM](https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm) +- [CAP-0046-01: WebAssembly Smart Contract Runtime Environment](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) +- [CAP-0046-02: Smart Contract Life Cycle](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md) +- [CAP-0048: Smart Contract Asset Interoperability](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) +- [CAP-0049: Smart Contract Asset Interoperability with Wrapper](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) +- [CAP-0050: Smart Contract Interactions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) + +
+ Video Transcript + +[00:00] Welcome everyone we're trying out something new by holding the Stellar protocol meeting here on the Stellar stage in the Stellar dev Discord we've been experimenting a bit with platforms the goal is to try to have these meetings in public so, that people can see what's going on and can ask questions and can understand the thought process behind changes to the Stellar protocol. But we also want to make sure, that the platform is not buggy, that it works, that it's easy to reproduce the meetings and it's, that it's easy to attend them. And so we're hoping, that this works we've tried a few different things. If this doesn't work we'll keep trying things until we get the right fit. But we'll see what happens today. So bear with us I will say, that these meetings the Stellar protocol meetings are designed so, that we can discuss upcoming changes to upcoming versions of the Stellar protocol in order to advance the protocol to meet ecosystem needs. So a lot of the times what we discuss are CAPs Core Advancement Proposal and this + +[01:00] is where sort of new features, that we're going to add to Stellar are big changes to the Stellar protocol start right and they are essentially technical specs, that outline changes, that we plan to make to the Stellar protocol and anyone can read these CAPs they're in a public GitHub repo the discussion for these CAPs happens on a public mailing list and some of them also here in the Stellar dev Discord again. If anyone is interested in this process or in understanding the CAPs or in leaving feedback about them or asking questions we encourage you to do. So and in fact. If you're going to follow along with this meeting it's a very technical meeting. So I would suggest taking a look at the CAPs in question at the moment we're focused on Project Jump Cannon, which will bring smart contracts to Stellar and there are actually we've sort of modularized our approach to this we're taking different segments of the engineering work, that needs to be done to make changes to the protocol to allow for smart contracts and created a series of CAPs each of, which sort of + +[02:00] addresses a specific portion of, that work. And so at the moment there are actually five CAPs, that relate to smart contracts we have 46, 47, 48, 49, and now 50. And over the past week there have been some changes to [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) and 47 and [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) is a new CAP today we're going to start to continue the discussion about these things and you know as part of, that we certainly again encourage you to take a look at the mailing list and we in these meetings try to address some questions synchronously, that are important to discuss together. But a lot of the work also happens asynchronously. So I encourage everyone to follow along with, that. And so today it's a fairly open-ended meeting and I think I don't know I'm not even sure the exact best place to start I know, that there may be some stuff to discuss on + +[03:00] [CAP-47](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md) I don't know. If we're ready to discuss [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) yet or. If we still have open questions about [CAP-48](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0048.md) and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md). So I guess the first question I have I guess lee is not here I know, that there were a few questions about [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md). But it looks like sid you may have addressed those questions maybe we can get a quick review by John over [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) and the latest changes made there perfect thank you let's do, that done great we can open up there, although I do think we probably should go back and talk about 47 what's in earth later I don't think we've actually spoken about it much in any of these meetings. But let's start with some overview of what's going on 50. So [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) is. If you saw the first draft, that came out I don't know about a week ago maybe a little more than a week ago it was really focused on just like + +[04:00] the question of like what does it mean to sign smart contract transactions on Stellar the scope of it over the course of the week as I thought about it and talk to people a lot of times really change from just like what does it mean to sign. But more of what does it mean to do and what is the ownership model, that we think should be like the default basically of owning it like basically like what does it mean to take an account take an action where your action affects some assets, that you probably own. And so basically like in terms of what's in [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) and what has changed in the most recent edition basically it opens up with let me actually look at it. While I'm talking about it opens up with some comparisons against like why the model, that is used in Ethereum is really effective the model being basically like you can take actions in general for message.sender in Ethereum + +[05:00] there are some exceptions you know where you use signed messages and stuff. But you know prc-20 tokens for example kind of a bedrock aspect of Ethereum are based on this notion of like. If you're a message.sender you control the assets and you can do whatever you want. If you need to delegate, that control you use transfer from, which works via an approval message where first you were message.sender to commit, that approval. So this is the same kind of model, that I. Then propagate to Stellar. And so we introduce some new types of transactions basically, which I'm talking about here is like an invoke contract transaction this transaction starts off looking just like a classic seller transaction source account sequence number fee all the stuff you're expecting to see. Then the next set of parameters are basically like what contract do you want to interact with what symbol are you looking up what parameters are you going to pass, that function, that corresponds to, that symbol. If it exists. And then a read write set, which I don't talk about at all and has nothing to do + +[06:00] with this CAP and it's really more to do with [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) I'm not going to talk about it. Then the next thing, that introduces like okay well like who is running this transaction like. When I ask who is message.sender or the terms I'm using is like this get invoker function host function what is it going to return. When I run, that contract. And so basically there's a new signature slot, that basically identifies, that person, that key. And then there's one more layer outside of, that which has your normal Stellar decorated signatures and there's a ton of justification about like why, that's a good model why certain things are done like this like why do you have this like three deep nesting of structure and all this other stuff why shouldn't you know just do signature verification in every contract why shouldn't you do replay perfect prevention every contract all the stuff kind of ties into it but, that's kind of what's in here + +[07:00] so John one thing, that strikes me here is, that it's a fundamental shift from the Stellar the current Stellar account model in the sense, that you know it doesn't have the all the kind of like the high level stuff, that you get in a Stellar account it really boils down to like a single signer account right yeah exactly and I think there's like a lot of merit to this model mostly. Because like one of the things you get from like having a Stellar account is this like ability to have multiple signers. But like in reality we use like 80 25 519 and you can do a threshold scheme for, that anyway. So like you can just sign with a signal a single signature on chain, that is like produced to be a multi-party communication multi-party computation off-chain get a lot of the same results as you know having an account with multiple signers + +[08:00] signers the other things, that are on the account there is like. When you're running a smart contract you don't have access the way, that I'm proposing, that we've been talking about things right. Now you don't really have access to like your native balance your native trust line directly. So there's no need to have an account to store, that balance instead you would potentially do like a [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) wrapped XLM type of thing you don't like you can use a different account sequence number you don't need like. If you want to have a sequence number you with the same key you can. But you don't need to. So basically this is just like a strictly more flexible model, that's going to be more efficient on chain. Because everything boils down to a single signature, that's my stance at least yeah it's worth mentioning, that even. If you use threshold signatures. If you want to rotate keys you still need to modify the key, which is basically not an option here + +[09:00] that is sort of true there are two ways you could do threshold signatures I'm like not an expert on photography. But like. If you want to rotate the underlying key like for example. If you're like underlying secret key was compromised. Then yeah you definitely don't have, that option here but. If you just want to rotate the key shares, that's actually possible in a re-deal in like a in a threshold system, that supports redealing. So so it depends what level of security you're looking for here interesting and what's the relationship between the classic Stellar accounts and these new Jump Cannon accounts none whatsoever like you might have an account, that's like gabc and you know you might want to sign for you know your smart contract interactions as like gtomer assuming, that you actually had the + +[10:00] secret key for, that vanity basically like in terms of becoming the invoker the message.sender you don't need a seller account to exist for, that purpose it's totally completely logically separate I guess as long as you control the key you can become message.sender I think the distinction is not the Jump Cannon there are Jump Cannon accounts and there are Stellar accounts I think the distinction is, that there are Stellar accounts and John cannon doesn't actually have accounts Jump Cannon just has signers or signatures, that's a really good way of putting it lee well you still have a concept of an account on the jumpcat side. But it's more of an like it's like an emergent behavior thing right it's like the sum of everything, that this signer you know owns in different contracts + +[11:00] becomes the account it's like an abstracted yeah I mean you couldn't describe it like, that. But I think there's no physical representation yeah I think there's like what the protocol defines and the protocol doesn't actually define an account in the encounter in the [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) proposal. So I mean the ecosystem and tooling can all refer to these things as account they can treat them like accounts they can display them in, that way. But the protocol doesn't actually enforce, that like it does today in principle. If everybody hated this get invoker mechanism even, though it's like very it's exactly parallel to the message.sender message.sender mechanism in Ethereum. But suppose, that everybody in the Stellar ecosystem totally hated it the proposal actually described. So you could just completely ignore it and like everything would work. Because oh, that's actually, that's one thing I should point out compared to the earlier + +[12:00] version, that you and I have discussed Tomer this version actually makes the invoker signature optional, which was something, that came up. When I was talking to Nico about like there are lots of times. When you just like don't need an invoker signature like the contract will never ask who the invoker is. Because it's not relevant and the invoker's saying friends in ed25519 where you need to reveal the key also ends up being like 96 bytes and you just like y sign with some random key for no reason you could just do nothing so. If people don't like this mechanism ignore it I'm still trying to wrap my head around the interaction between like from the user perspective you know they have like a an account on the Stellar side and like what happens. When they move you + +[13:00] know let's say we're in the world of [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) not, that I'm suggesting we should do, that let's say we're there and I move an asset to the smart side and let's say, that my classic side has some sort of like multi-sig baked multi-sig scheme like what would be like the behavior here the expected behavior, that's a really good question I think it depends on what the wallet kind of wants to do for me the kind of same thing to do would basically be to let's say this is the first time you're interacting with smart the wallet generates a new key for you or it asks you. If you want to use some key, that you have. But probably it should just generate a new key for you. And then like somewhere in this proposal there's like a lot of stuff here. Now in the like compatible with [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) section wrap it basically what you end up doing is + +[14:00] you end up signing with the whole multi-sig configuration on classic, that you need you run this rap and it'll be at the medium threshold I should say you run this wrap operation it wraps it up into a smart asset in the [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) cents and it sends it to, that address whatever address you specify now, that's the address, that owns it on the smart site. So I'm expected to migrate to a new account ID. If you don't have multisig setup you don't need to. If you do have multi-sig setup and you like you uniquely control the key you don't need to like for example like imagine, that I have like you know three hardware wallets and I need two out of three of them to sign to do anything on my account well it's perfectly fine for me to do the public for me to use the master key I'm the only one who knows it + +[15:00] but like. If I'm not the only one who knows the master key then, that's not going to work. So it really depends a lot on the configuration of your account. But you should propose address is going to be the same no do we propose a new stir key for the jumpcat inside for these account IDs I don't know, that's a good question homer they're maybe we should yeah I mean you will need that anyways for yeah. If you want to deposit into contracts. So I think the like the dangerous thing here is, that I have an account on the classic side it's a account it's a multi-sig and let's say, that the master signer is nullified, and now I ask someone to send over an asset on the smart side they use, that + +[16:00] like the public account ID, and now I don't have, that. And so I'm screwed indeed. If that happens you are completely screwed sounds like a pretty big footcon I mean in general on smart and this is like this is not unique to Stellar this is like true on Ethereum with the rc20s too. If you send money to the wrong address bye money like it's not like seller where we're just like oh, that account doesn't exist sorry yeah but. If the wrong address is the right address and it's on classic, that's, that's problematic yeah, that's, that's the big problem the fact, that they're like two domains and you have like these addresses, that are valid in both domains. But actually mean different things, that's, that's like a huge foot gun. So I do think. If we're going down this route I would + +[17:00] definitely say, that we need a new sturkey for accounts on John cannon well and or a new way a new type of account on classic right the people should create deterministic accounts on classic, that don't have a master signer. So you can't accidentally use the master signer. But yeah having a different circuit I guess would mostly do the same thing huh, that's actually not a terrible idea it just seems kind of wasteful to me to require a classic manifestation of an account to do stuff on smart there's actually no dependence on, that thing having any physical manifestation. So like requiring it to come from a deterministic account just seems like super unnecessarily fast like a lot of them you know I'm just saying. Then you can't interpret a deterministic account + +[18:00] as a you know. Because like a deterministic account would have a difference turkey right. And so people kind of moved over to, that I mean I guess the question is the plan to kind of like completely junk classic and get ready for is a plan, that like you know classic will form the basis of like a super low cost like high performance DEX and payment network right. So in the latter case it seems worth it to like say, that. Because these smart contracts are designed to interoperate with classic we should like make use of the you know this author did the we should have like a single sign-on. If you will for both sides of the network but. If the plan is eventually to deprecate classic. Then maybe it makes sense not to have single sign-on I mean like single sign-on is a myth no matter what in the world of smart contracts what. If I just build a smart contract, that doesn't use the same scheme like what. If I wrote a smart contract, that actually just does + +[19:00] the same thing as the current get invoker scheme you know it does it by you know you submit a single signature I validate, that signature and, that's, that yeah I mean you know of course you can implement crypto schemes, that aren't even supported in classic or whatever you can do anything it's a smart contract well the question is what are we going to make like super easy and what are the idioms, that we're going to kind of train our developers to use effectively yeah John I definitely agree with David here. Because if you look at both [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) there is you know they kind of imply, that you know we expect everything to kind of like move to the smart side in reality. If we want them to coexist. Then you know [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) are I would say simple from a protocol perspective. But they introduce downstream complexities across the + +[20:00] ecosystem of having you know two types of accounts and two types of balances. So the question is you know does it actually make sense. If we do think, that they'll coexist you know what David proposes like for like having this like another new account, that you can migrate to it's a minimal it's a different type of account it's a minimal it's a single signer account. But it actually has representation on both sides, that might be you know something, that's worth considering. If we expect classic and Jump Cannon to coexist yeah I actually just want to jump on, that and just emphasize this you know, that two sets of keys like two sets of accounts I think we have to evaluate this in the context of the goal of equitable access. So this is a like all of this complexity for users is maybe easy for someone who is + +[21:00] immersed in the crypto like in the blockchain space. But for your average user who is an immersed in blockchain technology, that's a pretty high bar of complexity to navigate yeah and it's not just the absolute complexity. But kind of the shape of the curve leading there like in an ideal world the sort of the expert there's kind of a smooth line of like the amount of expressivity you gain versus like your expertise right. And so like. If you start just using classic and you want to write your first smart contract it's actually there's like a relatively gentle on-ramp to get you doing, that right versus okay. Now create a new account type and whatever and I don't know what I don't understand is David's proposal doesn't eliminate the need for + +[22:00] having two sets of keys like you just end up with like okay like. Now I have a classic account a normal one a g account i'll call it in the terminology we're using an eternity ago. When we were talking about deterministic accounts, and now I'm going to create another account, that's a d account, that has a different key and I'm right back where I was. So I don't know my interpretation of David's proposal is not to maintain these in parallel. But you know you get an option to migrate to this new account type you know the account this d account is a single signer account, that's valid on both sides and from, that point on the user doesn't actually interact with the g account anymore like they have like this one time migration and presumably new accounts are this d type. But by the way I don't actually think this is necessarily better right like I think, that our you know maybe the concept of just like you know account authority + +[23:00] is something, that makes sense to just kind of expose in the smart contract world right and say like well these aren't smart contracts and avoid their smart contracts intended to interoperate with the legacy Stellar accounts. And so to just kind of like expose, that the way you know we expose single sign-on across multiple services is a useful thing is there like again what's the driving force for like why we can't have this key signature be the decorator signatures we have today and instead of having the source account be message.sender. So like the account the classic account ID being message.sender, which means contracts still are only dealing with a single identifier like you know they're dealing with + +[24:00] with account ID or contract ID as the message.sender message.sender. And then the protocol just hiding the fact, that you know for the most of the time, that's going to be the master key. But sometimes there might be a multi-seek key. But contracts don't actually get exposed to, that. So from the contract from the Jump Cannon side it's the same experience it's an account ID or it's a contract ID and, that's it and like I think. If I understand correctly the thing we were going for with key signatures is, that accounts don't have to exist. And then we just say, that an account ID doesn't have to actually exist on the classic side for it to be usable on the well I guess it does doesn't it okay sorry no it doesn't like it's this it's actually the same in classic right like you don't have the account doesn't need to exist to use it like you can. If it's a single key like you know like you can always reference it and use it in different schemes like you know as a signer for + +[25:00] example it's just a key. So i'd intended to write about this before this meeting today there wasn't enough time it's on my list of things to talk about like why it should work this way and not be just the account signers signing for it. But at the end of the day my justification just comes back to pre-signed messages all the time for this, which is, that. If we don't expose what the account signers are what does it mean to have a pre-signed message let me give you an example imagine, that you're writing an erc20 token, which is all my examples nowadays. Because it's like super fundamental token, that we're all familiar with you have these operations you know you have balance of and allowance and approve and transfer from and blah and in this model everything still works based off of message.sender. So now what I do is I sign with my account let's say, that I have a two out of three multisig account like I was talking about before with like you know + +[26:00] I own three hardware wallets I sign with two of them, that's how I do stuff the account only gets message.sender, which is my source account in this model okay. But now I want to implement eip2612 permit how do I do, that like how would you even default how would I even define either 2612 permit also I'm seeing people complaining about my mic volume okay David. Then you should just adjust your just my volume for you everyone sounds good it's just some people are much louder than others. So I have to like constantly adjust my volume, which is annoying but. If you right click on me you can adjust my volume to you anyway + +[27:00] anyway oh awesome thank you yeah I got you. So how would you even do eip2612 permit like the way, that works is it works based off of signing a message for the public key and, that was message.sender message.sender okay. So basic and like what signatures do I use. Now like do I use the account signatures. Then it's not transparent I have to go and actually look up the account find its signature find the designers check them against, that blah or do I just use the single signature for the public key, which is the very much more computationally efficient thing to do. But it would be wrong it wouldn't convey the right access controls can we maybe like I think there are like two problems, that we are, trying to deal with here there's one around I think and I think this one exists even in the + +[28:00] current version of [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) like regardless, that is. When I give somebody my address right like my public address how does it look like. If I decide to upgrade my account you know. When I say upgrade to my account to a smart wallet like I really want to move all my balances and stuff right under my wallet. So how would, that look like I don't think the current proposal kind of allows you to deal with this type of situation. And then for the signature scheme like why not. So right. Now we have keyed signature now, that it's actually inside I think what we can do is actually make add a new case, that would be like classic scheme or something right, that is just account ID. And then threshold. And then and. Then the decorated signatures + +[29:00] and, that's it. And then you can actually use your like the invoker can be an a classic account ID, which means you can actually use, that as a you know. When you as a recipient for anything really. And then you have the threshold like you do on classic great. But then right back to the eip2612 permit like you still have to go and act like. If I'm a contractor implementing the ip2612 permit I have to actually go and read the account look at all the signers check the signal no you do you wouldn't do, that like you. Because you never sign like to sign something like. When you sign a payload what you the statement, that you have is here's a binary blob right, that you want to sign as a specific invoker right. And then here the signature is fresh like basically you have to sign + +[30:00] a statement, that says those signatures. When you add them to. When you yeah like evaluate them in the context of this account you reach this threshold you cannot guarantee, that I don't understand how the account could not go and like could avoid going and checking the contract could avoid going and checking I just I don't see how, that yeah well it's the same like well, that you mean verification wise is, that what you're asking, that's, that code is yeah, that would be like we need a function of source, that is like verify right, that is suppose the classic scheme I think, that's kind of all there is to it but, that has quadratic complexity yeah, that's okay like it's really not no like it's all about what is it no like it's gas codes right like. If people want to do, that they're not blocked from doing it + +[31:00] but it's like really expensive whereas like just checking a single signature I'm not saying to remove the single signature I'm saying, that you have a way out of the like my public address is g whatever and I don't. If somebody you know like sees my public address and decides to send me funds using smart I want, that to not go to the devnet. And then you want the contract implementer to go and implement both verification methods I mean what you think is we could also have like a you know like a parameter type or something like a signed blob or an account authenticated blob, that would be you know it would be checked independent before executing the contract basically like maybe we don't need to verify dynamic data like all the signatures can be checked statically we're getting into the area of being like super parental and telling people + +[32:00] how to write their contracts. When as a matter of fact you know they already have especially things like the type of bridging applications, that need this type of capability they already have implementations on other networks we don't want to be to parental I think I'm on John's side with regards to keeping simplicity you know as a core value and really like zooming in on these like single signer accounts. But I'm really concerned about the interoperability aspect you know for the ecosystem and I do think, that having migrating to some sort of like a new account, that is a single sign or simple account, that you obviously you can only migrate to you know. If you have a single sign or a current account. But having, that being existing on both sides is something, that the ecosystem can tolerate + +[33:00] it's a little bit more complicated than, that tumer it's not just, that you can only migrate. If you have a single signer it's, that you can only migrate. If the only signer is the master key and, that means lots of accounts will be locked out yeah. So I. So what I see is you know like your wallet can you know obviously there's like a migration thing and your wallet can tell you hey. If you want to work on smart you need to migrate to this new account most accounts by the way are just you know like single signers and they do have a hundred percent of the like master weights. So I don't actually think, that's a problem. But you know your wallet can tell you hey like this is you know this is like the new type of account. If you still want to maintain your multisig these are like other ways you know you can do multi-sig in like this brand new world. But but the thing is, that migration + +[34:00] happens once right I don't have to actually as a user as a well. While developer need to I don't even think about this every time I move between sites yeah I think it makes it pretty difficult for wallets either today or, that are developing for standards like sub 30 for example to be able to. Then it will also interact with this world like we're still sending people down this track of like you need to have two sets of keys. Because the migration path is not particularly clear I think otherwise or we're saying like you have to migrate all the way to this only smart only Jump Cannon cannon construct like how would you have multisig on Jump Cannon and multisig off on classic at the same time with the same account is, that doable + +[35:00] I'm proposing like not to actually ever like having just a single migration. And then you're in this other world, which is both classic and jump canon. But it's on a simplified account model now. If you have a multi-sig account on right. Now on Stellar. Then then we can think about ways to either you know either do threshold signatures or just actually deploy a multisig contract the same way, that multisig contracts are on you know Ethereum and other ecosystems what you're describing is every account, that exists on the network today, that uses multisig deploying their own contract on Jump Cannon, that is one option again, that's only. If you want to interact with Jump Cannon right so. If you're a vibrant user and you don't you know you're not using any smart contract + +[36:00] like these are not things, that we need to have figured out like the first you know like at launch and yeah it does not have to be like one contract per wallet right it can be obviously the parity wallet is not a great example. But the parity wallet was like a single contract, that provided multi-sig for a lot of accounts right you hear, that. While it's super secure in fact nobody's ever been able to get their money out of it yes it is super secure I mean I still don't understand the whole like I call immigration thing how does, that I mean like. If if you look at some of the interesting accounts, that are multisig right issuers like are we saying, that changing the issue are probably key no I'm not saying, that at all I'm saying you only need to + +[37:00] to migrate to a new type of account. If you want to interact with Jump Cannon and [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) proposes, that exactly. But like to a much greater extent right you need to every time you move between the like I'm talking about the issuer why would the issue where needs to need to migrate well they don't want to have like any presence in Jump Cannon they will have to use a different account I guess I think like there will need to be a distribution account potentially, that. If they want to interact with smart contracts they need to be on smart. But I you know I think, that depends a lot on the asset interoperability story, which you know we still need to think about + +[38:00] can you just tell me what this migration looks like I want to mic you know I want to migrate from my classic account to a smart account what is what has to happen I actually have to like somehow move balances or does it just like transmute balances and annihilate the classic key the very simple happy path for something like this is, that you log into your wallet tells you hey we have a lot of new functionality on smart contracts. But for, that to work you need to migrate to a new account let's call it a d account it takes you through the process and once you end it you know effectively like merges your account and I'm saying happy path meaning, that it's there's just a single signer and it's a and it's the master signer. And then you know. Now you have a d account and you can either operate on regular Stellar classic + +[39:00] you know make classic payments and such or you can operate on smart contracts. But it's a simple account you cannot you can never actually do on-chain multi-sig the way, that you did with your previous account I don't know. If we need to explore necessarily like yeah like in this meeting like the actual this actual migration flow it sounds like it will have a lot of edge cases like I'm thinking like from a kyc point of view right like you have a bunch of trust lines like you probably I mean you need those d accounts to have the exact same public key. Because that's what got kyc all you deauthorize trust lines you know, that's another option. But you know things like, that yeah again like happy path constitutes of accounts, that are very simple they + +[40:00] have a single master signer and assets, that are for the most part you know the overwhelming majority are not asset not auth required or and can easily be moved cool yeah just, that's all I was hoping for was a very quick explanation to like help clarify in my mind what it was, that we were talking about thanks I mean like I need to think about it more I'm like i'd really what I really would need to do is go and like look at what the current like configuration of accounts network wide is you know there's like 6.7 million accounts or something like + +[41:00] that, that can most people actually just migrate to a new public key meaning, that like their master weight has weight one all the thresholds are equal to one. If that's the case or I said migrate to a new publication migrate to only their public key. If that is what most accounts are like. Then like maybe this is reasonable. If most accounts aren't like, that or even. If many accounts aren't like, that. Then I don't think it actually gets us anything there's no worth poking holes in it. Because it's just like not general enough to be worth considering. So I need to go and like you know hit up hubble and figure out what the situation is wait hold on quick I think the alternative people still can't use smart except. Now they could lose money. If people accidentally send the money on smart i'd rather have a world where most people can't use smart But it looks like they can. And so money gets + +[42:00] destroyed yeah I also just want to point out like we can research this with hubble. But what we should also evaluate is there may be a lot of accounts on hubble, that are not actually meaningful and whereas we should also have a look at products, that are getting developed either recently or in the future, that we know of, that are getting developed on Stellar are they using things like SEP 30 how are those products like what's the story going to be for products, that are just setting out. Now with multisig. Because if we do just have a look at just hubble we may find a large number of accounts, that only use a master key they also may be inactive accounts maybe accounts from the past accounts, that are not actually relevant to where we're going from here yeah. But lee like we're not saying we're not annihilating the idea of multi-sig right we're just saying + +[43:00] that like these you know we need to find like a different multisig construction for these accounts moving to this new account type well it sounds like on some level at least, though doing some research is a good idea to understand how people are using multisit currently right any other thoughts on this issue I mean we have about 14 minutes left and we can stay here or we can move on. Now i'd like to go back to like the first thing. Because maybe, that's what, that would be a different approach to this problem, that is like the question I was asking earlier, that is how do we think about like I don't know how this is solving Ethereum like. If you have this + +[44:00] flexible invoker thing like. If I decide to take my account and move my assets into a smart wallet my public address in a way I think right. So so how does the money gets routed. If somebody sends money sends funds to my old key. If you have the address and only you have the key you just go grab it public key no what I mean is, that well it's kind of funny right like does it mean I have to even, though I decided to upgrade my you know my setup to a smart wallet I still have to keep this kind of single key setup around forever upgrade to a smart wallet you also have a smart wallet welcome there's no way to stop using, that + +[45:00] address other than to tell everybody to stop sending money there like I mean it's no different from. When you move houses right like people will keep sending mail to your old address much to usps's display unless you tell them not to. So you can do forwarding right in, that case so, that's kind of what I'm getting through there's no way we're going to do, that probably like no. But like for like. If we had to support those type of scenarios some way to do forwarding you could do forwarding for those multisig accounts why do forwarding, though just keep the key around until you don't want to forward anymore and go and collect the money like we don't need to be dad for these people. If they know. If they want to switch to a smart wallet they should know what to do no I'm just trying to kind of compare the complexity of like migrating to those d you know whatever accounts to + +[46:00] to having a way to kind of make it easy for people to change their keys basically and even in smart. If you make it easy for people to change your keys you're just gonna end up right back at the okay well. Now I have to go and read this other thing and check the signatures based off of, that and everything becomes less efficient like do we care about performance or not. If we don't care about performance let's just glue on the EVM and get 13 tps I don't understand why we're working. So hard. If we care about performance we can't do, that and we have to do better but, that means we should be making decisions, that matter I mean it's not just about performance right it's about like the whole reason we are talking about this is for usability right like making it, that people don't have those food guns you know everywhere. But I mean I think, that we care not. So much about performance in absolute terms as the scalability of performance in terms of cores so. If we + +[47:00] can kind of split it up into work, that can be parallelized and work, that can't be. Then as long as there's a way to do the signature checking such, that it can be paralyzed I don't care. If it's you know i'd rather have it be four times as expensive. But embarrassingly parallel as like you know four times faster and like it's competing for like the same cpu core as like all the other logic and all the transactions sure. But i'd also rather it just be embarrassingly parallel and not four times slower and like what I'm saying is like these the discussion we're having right. Now doesn't change the parallelization well it probably makes it a little worse. Because there'll be more contention. But it doesn't like it definitely doesn't make it better, that's definitely like Nico's proposal will not make it better. But it does mean there's more work for everybody to do. So it's definitely going to be slower should we just add bls signatures the other thing is like do we have a particular it seems like not all implementations of threshold schnorr like people like cut some corners like do things, that like + +[48:00] we don't know how to attack. But they don't they prevent the proofs from growing through. So there's the fact, that like the multi-state we have. Now is it may be slow. But it is very simple and very easy to understand the security and. If you want this level of security you can still use a smart model you just pay for it and. If you trust the threshold schemes and. If you're happy having a single key you use a single key same thing I said before like I don't need to tell people what to do in the land of smart contracts. If they don't like the thing, that I told them to do you know what they're going to do they're just going to do it slower differently right anyway you can make it easy for them to do right. So you can make it very easy for someone to use it in a smart contract to check, that something's been authorized by a particular classic account right and. If + +[49:00] that is something, that's easy to do. Then people will use it where it's appropriate and there'll be more coherence across the two worlds do we want to talk a bit about contract lifecycle today we have like eight minutes. But I mean there are some open questions, that came up during the in the dev mailing list, that I can talk about. If we think we have enough time I think we should start on it and. If we run out of time we can bring it back up at the next meeting okay + +[50:00] okay. So one of the questions we're still discussing is. If the contract code vector should be like the size should be fixed in the XDR it should be configurable by the validators and you know the advantage of having it configured by the validators is. If for some reason we think it's you know. If there's an issue the validators can. If there's an overlay issue for example and fix it through the values instead of you know updating the XDR, which would require a I believe require a protocol upgrade does anyone have any thoughts on this I think John is John's advocating for it to be fixed in the xcr and Nico was advocating for it to be configurable well you need a parameter upgrade either way right or are you suggesting, that different validators have different thresholds no. So yeah you yeah. So you would need you need to be a parameter upgrade and like an upgrade to the validators + +[51:00] or a protocol upgrade right. So either way it goes to a consensus in one you don't need to recompile, that seems reasonable to not have to recompile I guess or to pick maybe we don't pick like four gigabytes. But we pick something, that's much larger than we think people should need in the XDR. And then we enforce a lower limit in validators, that's what I was thinking yeah I'm just like my big concern about this is like it's not safe to ever make the number lower you can only ever make it bigger and I'm just like what is the point of this work like why shouldn't we just choose a number, that makes sense and basically. If we want people. If people want to write bigger contracts just split your logic up among two contracts I mean it's not a big deal like do we really want people uploading this upload like a gigabyte 100 megabytes + +[52:00] like what's the number, that we're happy with people uploading in a single operation choose, that number and call today likewise what's the number, that we're happy loading up into the vm as a single operation we'll call it a day great and I can see you're talking I can't hear you at all I don't know. If anybody else can hey can you hear me. Now yes cool yeah I just wanted to point out we have the same issue basically with like there's gonna be quite a lot of fixed numeric parameters in the system it's really not just one, although the cost factor is all the memory limits all the parallelization factor. When we get to parallel execution there's gonna be a big bucket of numbers, that are just constants, that the system runs on and there's gonna be this is a CAP coming at some point we haven't got to it yet. But there's going to be a parameters block + +[53:00] we're not going to have a single parameter in the ledger header for every single one of these. Because the ledger header will get gigantic a new magic ledger entry called smart contract numeric parameters and people are going to have to vote on, that on a regular basis. So I would just say it is a number. But we would have to agree it's a number, that can only go up. Then never forget no I don't agree or it can go down with we have to have a policy, that like we'll give you two months notice before we reduce it or something. But no I don't agree with, that it cannot go down you can break somebody's smart contract by making it go down even. If you say like I'm still willing to run a smart contract, that's, that's longer than this was uploaded before what. If that smart contract itself is a factory, which deploys new smart contracts no there's no question, that there's going to be perimeter changes, that will break existing smart contracts, that is, that's guaranteed + +[54:00] because. If we change gas limits at all in any direction some smart contracts will begin passing, that didn't or and, that implicitly means some will fail, that didn't. Because they were expecting the other one to pass there are ways in, which you break people's code by changing parameters guaranteed there's no way to just make it one-sided, that's not going to work I'm not clear on why changing the gas factors would break people's code like you could always just run it with more gas right. But like it's something, that you expect to work on one set of parameters will work differently on another set of parameters sure, that's fine as long as I can figure out how to still run it is fine. But like. If you lower the size fact, that you can like fundamentally break never to be recoverable a factory contract it's totally recoverable you just have to convince everyone to raise the number again. So right. So it's only possible to go up as a consequence without breaking people's okay no this + +[55:00] no I just know I completely disagree with your premise these numbers affect whether I mean seriously suppose your contract relies on you call two contracts in order and it's really important, that the first one fails for some reason. Because you didn't notice, that it fails. But it's part of the semantics of your system working, that the first call fails we increase the element now, that. Now it succeeds your system stops working like people can rely on things in the wrong order. If there's no and there's no fix for, that. If you want the numbers to only ever go up right like yeah you sometimes you have to redeploy things back. If you want to fix them. Because there's no other way you've set a specific set of parameters, that you've defined as the ones, that my thing works in and. If the parameters change it is possible everything will work I mean I think it's. When it comes to all those yeah like weird parameters I think they're like especially. When it comes to any kind of limit I think they will always have like + +[56:00] you know. When it comes to coming down right like from those limits like. If you're close to, that limit yeah I would say like you may get broken like by some weird tweaks like, that like. If you want to be safe just play in a much smaller you know much further from the limit well I think it's totally reasonable to expect, that something like code size is not gonna like there's no pay-per-view reason why we're going to start reducing, that over time it's probably the case, that would stay the same and go up and I think it's a reasonable assumption. But it's no different than saying you know, that your set of pre-signed transactions, that you know can only have a certain fee, that they work under and they won't work. If the base fees increase or something like, that like there's ways we were actually concerned about, that problem grading yeah and we + +[57:00] were. So concerned about it, that nobody wanted to raise the base fee and we actually did [CAP-15](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md) fee bump transactions to fix, that problem. So like, that's actually like an example, that like proves the point, that this is a problem I'm it will continue to be a pro there's I just there's absolutely no way you can fix this by saying it can only move in one direction, that doesn't fix the problem. So as far as I can tell you're demanding, that we fix a problem, that can't be fixed I think we need to just set expectations appropriately right. If we tell people your contract we promise never to break your smart contracts it's going to be a problem right. Because we will end up breaking them and we'll also end up not innovating in ways, that we should have innovated. And so you know like people validators may vote, that they're not interested in seeing a particular parameter upgrade, that's, that's fair right it's not like us making parameter changes it's like + +[58:00] us collectively the entire network right it's not just sf making these changes but. If a change happens and all the validators vote for it the validators could vote to migrate to a new protocol version, that turns off smart contract you know there's changes, that can be made among the validators, that will break existing functionality I guess the core of my argument here is there are things, that we should, that are like worth promising not sorry, that are worth not promising to break and there are things where we it's like completely reasonable to make a promise not to break your stuff and I think code size is definitely an example where it is completely reasonable to promise not to break people's stuff you said it yourself you can't foresee a reason why we would want to make the code size smaller not a really simple one. But I mean. If you give me a couple of minutes I could probably come up with something I just don't think it's lightly. But you know there's a difference between likely and what i + +[59:00] think we should be promising I don't see why, that promise is more important than any other problem there are. So many other promises we could be making, that are also not reasonable this is just another one, that this really feels like we're bike shedding at this point like let's get a thing built. And then like how hard or easy we make it to tweak one number like, that's something we can figure out it's just not an important thing at this point I think David gets the final word in this meeting. Because we're out of time. But we'll reconvene next week and obviously there's still activity happening here on the Stellar dev Discord and on the mailing list. And so anyone who's watching obviously feel free to jump in there or to see what the discussion where it's tending anyone in this meeting obviously will also talk on those lists and get back together here next week. But we're out of time for. Now + +[01:00:00] you + +
diff --git a/meetings/2022-05-26.mdx b/meetings/2022-05-26.mdx new file mode 100644 index 0000000000..4370a5f025 --- /dev/null +++ b/meetings/2022-05-26.mdx @@ -0,0 +1,203 @@ +--- +title: "Smart Contract Storage Interaction Lifecycle" +description: "A deep technical discussion on Soroban’s smart contract storage model, interaction lifecycle, and concurrency guarantees, covering how CAP-53, CAP-52, and CAP-46-2 work together to enable deterministic execution, parallelism, and safe contract evolution." +authors: + - graydon-hoare + - jonathan-jove + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: + - legacy + - CAP-46-1 + - CAP-46-2 + - CAP-46-3 + - CAP-46-5 + - CAP-50 + - CAP-52 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This discussion continues Project Jump Cannon by focusing on how Soroban smart contracts store data, declare access patterns, and safely interact with the Stellar ledger. The session explores why storage design, determinism, and explicit access declarations are foundational for enabling parallel execution without sacrificing correctness. + +The conversation walks through three closely related CAPs—CAP-53 (now CAP-46-5), CAP-52 (Base64 Encoding/Decoding), and updates to CAP-47 (now CAP-46-2). Together, these proposals define how contracts persist state, how Core pre-validates read/write behavior, and how contracts are created, upgraded, or removed while keeping ledger metadata manageable. + +### Key Topics + +- CAP-46-5 Smart Contract Data model: + - Typed, persistent ledger entries derived from CAP-46’s value system (now CAP-46-1). + - Explicit read/write footprints that declare all ledger keys a transaction may access. + - Deterministic, serializable execution that enables parallelism without dynamic locking. +- Footprints and concurrency control: + - Why transactions must pre-declare accessed keys. + - Trade-offs between fine-grained and coarse-grained storage for performance and contention. + - Handling dynamic access patterns via offline “recorded” footprint generation. +- Point-access-only storage APIs: + - Rationale for excluding range queries to preserve static footprints and parallel execution. + - How developers can model higher-level structures (maps, tuples) within stored values. +- CAP-52 minimal contract interactions: + - New invoke-contract transaction shape and host functions. + - Contract-defined authorization and replay-prevention strategies. + - Design trade-offs around developer flexibility versus foot-gun risk. +- Authorization and replay prevention debates: + - Risks of rolling custom auth logic inside contracts. + - Potential for ecosystem-standard verifier contracts or shared libraries. + - Benefits for relayer-based transaction models. +- CAP-46-2 Contract Lifecycle: + - Contract ID derivation via hashing and user-provided salt. + - Discussion of immutability vs future mutability. + - Removing contract code and keeping ledger metadata tidy. +- Open questions around versioning and upgrades: + - How contracts should depend on mutable vs immutable dependencies. + - Challenges of data migration when contracts cannot access each other’s storage. + - Tension between protocol-level guarantees and ecosystem-driven patterns. + +### Resources + +- [Project Jump Cannon: Choosing WASM](https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm) +- [CAP-0046-01: WebAssembly Smart Contract Runtime Environment](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) +- [CAP-0046-02: Smart Contract Life Cycle](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md) +- [CAP-0046-05: Smart Contract Data](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) +- [CAP-0050: Smart Contract Interactions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) +- [CAP-0052: Base64 Encoding/Decoding](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0052.md) + +
+ Video Transcript + +[00:00] Okay. So I think we're gonna get started and hopefully David will soon join. So hey everyone welcome to another protocol meeting I'm filling in for Justin today I am seeing this. So in these meetings we discuss potential protocol changes these changes are outlined in these documents called CAPs or Core Advancement Proposals and the big change we're working on right. Now is Project Jump Cannon, which are, which is a feature to introduce native smart contracts on Stellar. So we've divided this massive change into a set of composable CAPs and the agenda specifically for today is we're going to talk about [CAP-53](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) smart contract data this was recently published by graden we're going to talk about [CAP-52](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0052.md) smart contract interactions minimal, which was recently introduced by John and we're going to talk about the smart + +[01:00] contract life cycle [CAP-47](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md), which was recently updated by Siddharth and requires some for the discussion. So let's do this graded can you kick this off with a review of [CAP-53](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) yeah just give me one second all right got it yeah sorry I just I had the pull request open. And then I actually wanted to switch over to the merged version of it. So this is a fairly straightforward CAP it's not really introducing anything, that probably will be a surprise to anyone here it's just formalizing something, that was left out or left sort of for future CAPs in the modularization, that we've been doing splitting off conversation into different pieces so, that we can work on them separately and land them separately. But it's fairly tightly related to the + +[02:00] data model, that was presented in [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md). So a lot of the motives in [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) around the data model implicitly talked about how, that data would be stored permanently. So there's concerns, that bear on the data model, that are sort of interacting with it. While it's in memory. And then there are concerns more related to its long-lived accessibility over multiple invocations of a smart contract well it's stored on the blockchain. So some of the requirements, that are sort of rephrased and brought into the foreground here have to do with interoperability where we want there to be something a little bit more general or I say sort of a less general more interoperable more generally understood more widely understood structure to the data than just a byte buffer a lot of + +[03:00] smart contract platforms essentially only provide a byte buffer storage service to smart contracts, which means, that nobody really accepts, that exact version of the smart contract code can necessarily read any of the data, that's stored there and, that produces interoperability problems. If other third parties want to access it offline you know browsers, that want to take a look at the data it also creates versioning problems. Because it means, that you're originally locked to the schema language or serialization format, that the contract used it means the contract wants to pass data it has to from one contract to another it has to transform it. So there's a whole interoperability angle here, which we wanted to address in the [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) data model and we're carrying, that forward here and I think a lot of the concerns only make sense. When considered in terms of persistent data. But this is the persistent data CAP. So so here we're just talking about basically what the ledger entries is gonna store things looks like and + +[04:00] a handful of host functions for accessing it they're very basic functions they're just key values to our access functions they're not range functions they don't include iterators or range queries or anything like, that they're just get put does do we have a key and delete the key very simple host functions the interesting thing really is the choice of granularity, which is left to the user. So this is a little bit different in, that many blockchains provide a this is different than many smart contract systems you'll see many smart contract systems provide a key value store, which is keyed by a byte string or some kind of a prefix, that goes into a merkle tree or something like, that we do not have we essentially don't expose any interior nodes of the mercalli storage, that we use to use this anyway we just provide a single bucket list + +[05:00] hash the fact, that our data structure has internal localization doesn't really apply here anyway and structurally it wouldn't make sense. But but what we're doing here is also allowing structured values as keys rather than byte string. So of course you know you can serialize any value and you will in the case of using it as a string here. But the api is encouraging users to have fairly structured values. So they can have fairly rich keys and implicit to all of this is, that there's a parallel access and consistency model, that's discussed in this CAP, which is, that we're trying to encourage the possibility of executing smart contracts in parallel and. If you have parallel access to a data store you have to talk about what the consistency model is what is it what does it mean. When two different users access, that model in parallel. So we're specifying here + +[06:00] that it's a serializability consistency model, which is the strongest possible it says you know equivalent to the exact order, that the transaction sets specified the transactions executing in, that has to be the observable side effect model and parallel models parallel consistency models imply the existence of some kind of a concurrency control mechanism how you actually enforce, that and in this CAP we're talking a little bit about a very strong mechanism for concurrency control it's what's typically called deterministic scheduling or non-conflicting concurrency control the idea is, that every transaction, that enters the system will pre-declare a footprint. So there's this thing called the footprint, which is the set of keys, that a transaction is going to touch whether it's going to read them or write them it actually marks whether it reads or writes each key in its footprint and the footprint is + +[07:00] static information, that accompanies a transaction. So this CAP doesn't describe exactly how a footprint is encoded or accompanies a transaction location. Because we don't even have a CAP open right now, that has transaction invocation or at least we haven't settled on one we have several CAPs open right. Now but. When transaction invocation occurs it's going to need to provide a footprint in this CAP is asking, that footprints are available and the footprint defines the keys, that are, that the transaction is allowed to perform these data access operations on so. If you try to perform a get again something, that's not in your footprint the get will fail even. If the value is there. If it's not in the transaction's implicit footprint it's defined as failing similarly for a put or even has a point query anything like, that you have to have it in the footprint. So so for simple transactions this is fairly straightforward you can tell what they're going to read or write. And so you just put things in the footprint + +[08:00] that they're going to read or write, that's fine for complicated transactions, that have a highly dynamic behavior maybe it's not even clear what they're going to read or write. Because it's you know subsequent to a transaction it's determined by an earlier read in the transaction these are what are called transactions with dynamic footprints the recommendation in this gap and what we're prototyping is a fairly standard technique from the literature, which is often called reconnaissance queries I think I'm using them here I'm using the term recording footprint recording here, which is, that you just run the transaction offline before you submit it on a read snapshot and, that gives you a fairly good guess and an approximate footprint, that you can. Then staple to the real transaction. When you submit it and it will succeed. If that footprint still matches. So it essentially pushes concurrency control + +[09:00] out of the transaction processing loop and into the user's lap. And so the user is. Now racing on divergence between read snapshots, that they use to construct their footprint and the footprint, that they actually submit a transaction with. And so theoretically. If there's a very highly contended key and it's a very different query they may have to retry multiple times. Because if they get. If there's any significant divergence between the recorded footprint and the footprint they submit their transaction could fail. But the database itself doesn't have to actually perform the concurrency control. So in some ways this is shedding load from a concurrency control mechanism inside of a database out to the users and, that has turned out to work very well for maintaining a very even high throughput on existing databases, that adopt this technique. So we're trying to adopt, that technique as well. So those are the two sort of main topics in here, that the fact, that + +[10:00] the user has control over granularity I should go back and talk a little bit more about granularity just for a second, which is, that the granularity control, that exists here it has a natural tension in it. So it's. So so doing a point read on a key value store necessarily has some overhead it has data framing overhead it has serialization overhead it involves going to the I o system at all it involves touching the disk doing a seek doing a read all of, that overhead is potentially quite high and. And so it's it can be worth trying to amortize, that overhead and read more than one item. If you are going to access more than one item you don't necessarily want to pay, that on a bit by bit or bite by bite basis you want to bring in a bunch of bytes at a time. When you do an I o and so, that amortization tension pushes you towards larger ios in a larger granularity of storage. But then the flip side of, that of course is, that. If you read or write data, that you don't actually need. If it's + +[11:00] wasted and you actually only wanted to change one byte in the middle of a large data structure, that's waste and you're paying for, that waste in terms of you know fees or cpu time or io or whatever so, that pushes you in the opposite direction of having fine grained data and, that problem actually just magnifies itself. When you start talking about parallelism. Because again your footprint is a unit of contention and so. If two transactions contend on the same data value they can't execute in parallel basically, that's, that's, that's what the footprint is doing is, that it's giving a static scheduler the opportunity to partition execution into separate lanes. And then those lanes will run with no coordination. But but those lanes necessarily are serial themselves you only get parallels between them and so. If you have a whole lot of transactions for example, that all touch some common data value in their footprint they will all be scheduled to run in serial + +[12:00] and. And so to exploit parallelism it is in the favor of the user to have a finer grained footprints. So so you have this sort of two different directions of pressure it's a natural trade-off between fine-grained and coarse-grained data access. And so we don't specify what the granularity is here we try to be very open about, that and so, that's why the key type is literally just an arbitrary value I think, that's all I really had to say about, that there's not a lot in this gap it's actually quite small and it kind of just does exactly what you're expected as a key value type awesome can you just quickly talk about the rationale for why there is point access only yeah. So range queries essentially aren't + +[13:00] compatible with static footprints. Because you know we don't know how far they go, that's, that's the simple version we could theoretically. But we would lose parallelism. So yeah awesome. So we are kind of like actively trying to deter you know contract developers from creating these like you know creating like a need for rangers with some you know like for example like a classic or the book is probably like not a great fit for this, which is okay with us yeah and I mean there it's a good point, which is, that in a broader sense a static footprint actually bounds the I o you're going to do it allows us I mentioned this in the contract it allows us to essentially have no surprises the contract is not going to be interrupted in the middle of the contract in order to actually go touch the disk dynamically everything, that it's going + +[14:00] to read it says upfront and. Therefore we can just do a bulk read at the beginning of the contract just in fact we'll integrate into a single pass through the storage system all of the reads from all of the contracts in a given parallel execution lane will just read all their data at once at the beginning of a transaction set execution. And then write it back at the end so, that kind of thing is naturally incompatible with something like dynamic range queries but, that said. Because you can store you know arbitrary values. If you want to store a map, that's, that's completely reasonable one of the values, that you store it doesn't have to be just a small string or a number or something you can store a map, that has a bunch of stuff in it. And then do a range query on, that map it's just, that. When you do an I o you're going to get the entire map is going to come off the disk. So you have to sort of navigate, that trade off yourself maybe shard your map into a bunch of different sub maps or something like, that. If you're interested in not loading and + +[15:00] storing the whole thing every time. When I tried to use a very early version of this design like a month ago or something one of the kind of like annoyances, that I quickly encountered was like. When I wanted to you know partition my namespace I basically was like okay well I need some kind of key, that is a tuple. And so I just used like the sc valve option and I just like pumped a vec full of stuff. And then use it as my key, that's I got like a petition namespace the thing was like doing, that seemed like kind of inefficient. Because it's like okay like I need a three like you know a three tuple as my key. So I like go I create a vector host function I push into it again host function I push into again host function. And then I call the like you know get function host + +[16:00] function again and it just seems like a ton of work to get a single piece of data. So do you have any thoughts about, that off hand I guess I'm not sure, that it is a ton of work like it would be my first reaction in the sense, that I don't know. So so. So for example you know we could make a contract put one contract put two contract put three, that takes three values as inputs three keys four keys five keys you know we could reflect those usage patterns in function signatures as conveniences. But I'm not sure they would do any less work and I don't think the calls in and out of the vm are actually all, that expensive I think you're only talking about one extra op code and a couple of like a push and a call. So from a user perspective I think you + +[17:00] have a good point and I think. If the SDK can't make, that pattern fairly convenient in terms of putting you know sort of a superficial porcelain on top of it, that makes it look nice. Then perhaps we should expand the functional repertoire to provide additional support for, that I think one thing you might be able to do just responding to your comment about the SDK is you know make it easy to use things like tuples like trying to make it easier to use things like vex already and maps you know tuples might just, that just might be one thing we could have yeah, that's, that's kind of what I'm expecting is, that you can do the kind of thing, that you know I hate to use this as precedent. But but the raw standard library does something similar here where it just says you know the people use tuffles up to about five + +[18:00] or seven or twelve or whatever. So like it just you know macrogenerate enough support for all the basic temple types, that anyone's like likely to use and just have them as conveniences. And then you only wonder how to use this arbitrary vector sort of approach. If you're doing something weird yeah I mean as long as the cost of doing all the push functions isn't particularly high. Then it doesn't really matter to me. Because you could always put SDK support to do this indeed I just built my own thing, that like I could pass the functions to and inside the parameters too and it would through the vect back out at me. So it would look a lot less disgusting we could generalize, that of course as long as the cost isn't high at the protocol level. So I think. the cost of a function call is fairly small function call and I again absolutely it's the case, that if. If we measure this and it's miserable I mean the other thing is, that I don't honestly think there's masses of I o operations in the normal + +[19:00] contract path right I think you're only talking about a couple of point accesses per contract call anyway. So so I'm not super concerned about, that path but. If if. If we measure it turns out to be expensive we can absolutely revisit this and try to you know add fast pass or optimize versions for this braden d do you think they could be contracts, that will be vulnerable to moving footprints. So you mentioned about the situation where dynamic footprints is an inconvenience or you know you have to do reconnaissance queries and they potentially could be out of date. But I'm wondering. If there's an angle here where, that actually makes the contract vulnerable in the sense, that you know one participant of, that contra contract could prevent another participant from interacting with it yes absolutely this is as far as I can tell this is basically always the case with concurrency control. If you have any kind of concurrency control mechanism somewhere you can create a starvation you can even you can survive + +[20:00] one party by just hammering on a contended resource in this particular case the user has the contract developer has a fair amount of control over it. Because they can change the granularity so. If a contract developer feels, that this is a risk or sees this happening or something like, that they can re-architect the contract to essentially sacrifice concurrency to get rid of the ability to have, that kind of concern. So so you know at the extreme end your footprint is the contract data there's only one contract data everyone who talks to this contract always accesses the exact same contract data and, that means, that everyone knows exactly what their footprint should be it's always just the contract data there's only one element everyone specifies the same thing and they all get serialized. And so there's no you can never have to have your footprint invalidate. Because your footprint is always correct. So you can do, that. If you find, that's happening it's just the worst case right. So you + +[21:00] you move away from, that. If you want more concurrency but. If if you're seeing, that people are able to and actively exploiting you know some kind of starvation concurrency starvation situation. Then then you may have to move back towards, that and I don't personally know a way to avoid, that I think. If we did any kind of dynamic locking we would be in exactly the same situation where someone could just flood the system with transactions, that take a lock and deny anyone else the ability to make progress and we would be in a worse situation. Because dynamic and currency the thing, that's really good about static and currency control as an approach is, that you have a guaranteed sort of throughput, that the things, that you have scheduled will complete one way or the other within their allotted time slice right they will either finish or they will abort. And so your abort rates go up. But the system continues to run + +[22:00] in this particular strategy the other strategy would be more like we would give people the ability to drag the entire system down. So a transaction set would potentially slow down dramatically. Because people are contending on a hot resource. So it's there's there aren't a lot of free lunches in concurrency control and I kind of feel like, that's a natural trait of you, that answer the question I know it's not like a fun answer no yeah I think, that makes a lot of sense the fact, that the proposal really gives the contract developer a lot of control and doesn't define the level of granularity, that they have to use I like yeah it's good + +[23:00] well great it looks like you've created one of the least contentious CAPs, that has ever came to life thanks for, that great cool. So let's dial up on the contentious or the contention levels John I think this is probably the third iteration of smart contract interactions. So for those of you who are here at last week's meeting you might recall, that we had a pretty big debate about this and I was in the interest of actually agreeing on something I decided to just remove all the functionality from the proposal, which sounds a little backwards. But in the context of smart contracts you can kind of put all of these authorization questions down to contracts and let them do everything themselves + +[24:00] and we had been kind of moving in, that direction on [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md) anyway with the introduction of like the invoker signature none option and stuff like, that. So basically at a high level of what's in this proposal there is a just like in the old proposal there is a new operation sorry a new transaction type and a corresponding envelope type called invoke contract transaction or invo contract transaction envelope and this contains the normal stuff like before source account sequence number fee it contains the contract, that you're invoking you know the ID the parameter the symbol the parameters it contains the read write set, that you would need for [CAP-53](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) as grading was just talking about and then, that's pretty much it. So how does one actually use this there are some examples in the CAP, which were pretty instructive about what the universe would look like. If we actually + +[25:00] did this there's a whole example section where I hacked up like two versions of an erc20 type contract. But they look quite different from your normal erc20. Because there's not really like a reliable message.sender, that you can use in this context. But basically the only other thing, that's here is just a few host functions, that are useful for actually doing some of the things, that we discussed last week. So there's some access to thresholds from accounts there's access to getting the signer weight by account key by signer key the signer weight for an account by signer key kind of hard to say I added a verified 25 519 function, which I think is also in [CAP-51](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-03.md), which is being written at the same time and, that's, that's pretty much it. So any questions about this + +[26:00] so I'm just trying to get into this can you talk a bit about like the implications around like accounts and you know we were talking about this a bit last week like what does this mean for like you know classic multi-sig accounts on the smart side the beauty of it is, that the proposal means basically nothing for those things. Because the contracts get to make their own decisions. So for some context like. If you scroll down almost all the way to the bottom the last example is like a sim a simple token based on account signatures and this builds on the classic Stellar multisig mechanism basically works exactly the same with two exceptions no pre-signed transactions here oh sorry no pre-auth + +[27:00] transactions and no hashtag signers. But it works exactly the same and it works at medium threshold and everything kind of is exactly what you would expect. But right above, that there's another example, that uses the like the single key version, that I was proposing as the invoker signature last week. And so this framework basically lets contracts build whatever they want. If you want some kind of you know support for seller multisig, that'll really be up to contract developers and ecosystem standards and stuff like, that my intuition is, that those things won't really materialize. Because they're not efficient structures on the blockchain. But they might materialize from case to case something, that immediately jumps out at me with these examples is, that it may be difficult to write these functions + +[28:00] some of these sorry I'm looking at the simple token based on account signatures example you were just referencing and the second code block has a check function it says internal function is, that something, that the network is providing or, that's an internal function, that the contract provides, that's a contract function, that's not exported. So I can imagine. So i've had to write code like this for our SDKs. When we were implementing sept10 and I was yeah I think it was septum and one thing, that is quite difficult to get riders iterating over a set of signatures for a message and determining a set of weights. Because there's different things you have to do like you have to make sure, that you don't. If somebody includes the same signature twice you don't use it twice to get you know double the weight different things like, that. So do you think by going this approach + +[29:00] expecting people to implement their own authorization we're increasing the chance of foot guns where people are going to implement what they think is Stellar maori sig signature authentic authorization verification. But it doesn't actually exactly line up with it, that's definitely possible I mean my like kind of ideal universe here in the sense of like what I hope people would do is probably somebody would deploy one contract, that has like a unified key structure a unified signer scheme basically you pass it some kind of opaque blob the beginning of the opaque blob is a discriminant saying like hey what kind of signature is this is it a single key ecdsa is it a is it like a Stellar multi-sig is it some other scheme, that I'm not thinking of like some kind of like quantum reset resistance scheme who cares and basically like the entire ecosystem relies on this contract or sorry another example would be like i + +[30:00] know lee you had requested like aliases we could have like a single ed 25 519 alias version all of, that implemented in one contract, that everybody kind of relies on as an ecosystem standard you don't have to rely on it but. If you do you kind of get compatibility across the entire universe for free, that's what I would hope would happen instead of everybody rolling their own. But like yes at the very worst case everybody rolls their own and. If you don't know how to roll your own for example like I didn't account for the repeated keys in this example working too fast you can get yourself in trouble and just to follow up on, that part of why I'm a big proponent of single key signatures and doing everything else is like you know secure multiple-party communication computation is. Because there's a lot less ways to blow yourself up on chain a single signature is easy to verify in fact my argument would generally be, that. If you want to write a good contract, that is really safe and easy to audit you should use the simplest + +[31:00] authorization scheme possible, which is, that all right can I ask a follow-up question is, that I'm a little bit well i'll be honest I'm a little bit behind on this entire aspect of the interactions. If you're dealing with a case where people do use the simplest and safest approach. But you know suppose you're a smart contract author who's trying to be conservative and you don't want to do anything too elaborate and you're using this interface am I correct in reading this, that you are probably you're probably not going to have to include an awful lot of code in your contract to make this work right is, that correct, that the sort of the number of calls, that you have to make to host functions is not particularly huge you're talking about in the case of like a single 80 25 590 right, that's right + +[32:00] yeah in, that case like it's very simple, that example is in the first one and like the code is basically like check the nonce hash your hasher you know message do a 80 25 5 19 post function call, that's pretty much it and everything just traps. If the wrong thing happens and you could probably write this in like five lines three host function calls or four host function calls pretty lightweight all told and it's pretty hard to escape all of like some of these parts no matter what you do like at some point. If you're going to do the authorization on-chain you probably need to do at least one 80 25 5 19 signature verification or ecdsa signature verification. So I it probably could be like a little lighter than this. But probably not significantly lighter than this well I guess this I'm just being my typical trying to shave things down approach + +[33:00] this feels to me like even the minimal version is a blob of code, that will have to get stapled onto every single smart contract and they will all run you know even. If they all wind up being conservative and they'll take your advice and be conservative this is all in vm rather than extra vm there's no way for them to say fastpath may just do this conservative thing. So yes and no. Because like even, though I think, that everybody will be conservative I still think the ideal universe of everybody being conservative is them all using a single you know contract, that implements this all so, that you don't end up with the same code cloned everywhere you just have a cross-contract call you probably think, that's worse than it is from a performance perspective. But it doesn't end up with like 10 000 or a million copies of the same code everywhere on the blockchain. So it's the plus the second part of it, though is, that. If there is a lot of ecosystem adapt adoption around some kind of standardized signature verifier we could always + +[34:00] deploy a native version of, that's super fast I see. So so. So you are I'm not I want to make sure I'm not promising you, that's ever going to happen no I'm not hearing your promise I'm just trying to understand what level of code reuse you're assuming is going to work and also. So you know calling a third party to do your authentication for you definitely gets us into the question, that is the other thing we're going to be discussing today, which is mutability of contracts and like you know versioning your dependencies like I think. If there's anything someone is going to not want to trust to a third party it's the authentication path unless they're 100 sure, that third party is you know immutable and like the code, that they audited the last time they read it I think another option is you know you can it could be a cross-contract call to this one contract, that's living + +[35:00] in one place. So we're talking about reduced wasm size it could also just be the library code, that everybody's sharing. So people understand there is no mechanism for library sharing besides stapling this like including the code into the contract yeah it's all right. So obviously there's no space saving like all these contracts are going to have the same code within them. But addressing the concern of people implementing these things correctly. If everyone's using this common piece of library code, that has either been audited or people generally have more trust in. Then then. Then they don't really you don't really have to worry. So much about the mutability concern. Because they're choosing to build, that into their contract at build time there are some trade-offs there like + +[36:00] I think it would generally be a thing, that would occur, that people would probably provide some of these like very standardized off functions like single key or you know based off of seller accounts or stuff in a library, that you can use but. If you want like a stateful system like lee you were talking about aliases and, that's why I keep coming back to this like the stateful system is only really useful. If your state lives in a centralized place, that people can rely on you know like it would be really annoying for me to have to go and set my alias in every single contract, that I use like I could. But it just, that just seems really irritating I think people would much prefer a system where. If that's an option there is some global contract for, that state limits basically I guess well I guess people could you could have a really simple contract it's just like an aliasing contract and you could have library code, that uses, that I don't know there's a lot of options here + +[37:00] when yeah like it is to me super interesting like is, that. So like what how do we think about, that like. When you make. So this is like the smart wallet type of like situation where the smart wallet is kind of shielding or separating the whatever key you're using at, that time from your persistent ID on the network I wouldn't necessarily call it like a smart wallet it's kind of tangentially related I guess it would be what I'm really saying is like I sign using key x. But my public key always stays as y and I can change x to x prime or x triple prime. But my public key always stays y yeah. So for context I think the aliasing came about. Because we were talking about how do we replicate what exists on Stellar today in the smart world and what we have today with mo like we often talk about Stellar multisig. But + +[38:00] actually the other component of Stellar's multisig, that we get is aliasing. Because you can have an account identifier. And then you can attach other keys to it. And so Stellar accounts provide these two concepts aliasing. And then multisig and I think you know there's been the concern address presented, that you know we shouldn't just implement the multi-sig, that exists on classic over on smart. Because there's a lot of trade-offs with doing, that. But the aliasing alone is like a feature, that I think, that's worth us exploring like what will, that look like. Because it allows people to do things like rotate their keys or have multiple devices or using the same address and. If I understand correctly John you're saying, that aliasing + +[39:00] capability could actually just be a contract yeah, that's exactly what I'm saying any other questions on [CAP-52](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0052.md) I think. Because it's. So fresh we probably need a bit more time to get into the weeds John could you elaborate a little bit on how you see replay prevention happening. So I see in the CAP, that there's this announce, that concept exists yeah could you elaborate a little bit with how you see contracts would typically do, that yeah I'm happy to do, that. So this whole replay prevention thing + +[40:00] gets kind of annoying in this proposal, that's one of like the big downsides of this approach, that I pointed out in [CAP-50](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0050.md). When I said like why we shouldn't do this, which is basically like every contract ends up implementing their own replay prevention. When wherever it's needed and this means, that like things get pretty annoying fast for example like on Stellar you like consider stuff center today you might like submit a transaction and you know you have replay prevention on it. Because the sequence number and you also have like you know a deadline effectively you know the max time and you know, that. If you get to, that point everything is done it can't execute or it has already executed. But like. If you want, that functionality here you also have to implement the deadlines in your contract and all these other things and everything just gets kind of annoying fast basically. Now again same kind of thing, that i + +[41:00] was just talking about you could actually. Because like in this approach. Because everything is done by signed messages you can actually delegate all of this to like some other contract, that deals with it. So you can imagine implementing Stellar's times you know time downs and ledger bounds and all, that other stuff in a contract and reusing it. If you still want it or you can rebuild in your own contract as well. But but basically there's no generic nonce here and the CAP goes into a little bit of detail about like why you can't use the sequence number and they're like my original proposal here actually had an example where the sequence number was like the transaction sequence number was used as an us. But it had a couple like kind of annoying details about it specifically like such a contract is like really vulnerable or such a design is really vulnerable to what's it called confused deputy + +[42:00] problems and. If you try to fix the confused deputy problems. Then it becomes impossible to use the sequence number as a replay prevention tool. So there's a lot of trade-offs here basically like I can imagine an argument where we just say like hey like people should be cognizant of their confused deputy problems etc and we make, that an option again I don't know. If I would personally feel good about, that. Because confused deputy problems are like a very difficult foot gun to deal with I think like they're an easy thing to overlook. So so I don't know. But basically yes every contract is building their own replay prevention or relying on it from somewhere else on chain got it I think one nice side effect of not exposing the transaction source account on sequence number two the contract is, that contracts are getting really set up for, that common relay + +[43:00] pattern, that we do see in other ecosystems where people design their contracts so, that the message, that's getting signed to be used like the contract call, that's getting signed to be used on chain is independent of the participant who's actually submitting it and paying the fee and, that, that participant could be like a third party, that has you know is playing, that role of relaying making sure, that the transaction is on network. So in some ways it's nice it sort of sets up contracts to really work well with, that because. If a contract is written to use the source count you don't get like you would have to. Then modify the contract to make it work with a relay yeah definitely you mentioned this to me like I don't know a week ago or something and, that idea really kind of stuck in my head. When I was writing this. So I totally agree with you, that's a huge advantage of this design + +[44:00] I think something, that i've heard I think maybe grading like raises a concern is. If we encourage people to write their own replaying mechanisms, which I don't think we can actually really get away from. So maybe it's not worth having this conversation but. If we encourage people to write their own replay mechanisms people may write replay mechanisms, that are really inefficient say is storing data on chain forever type of inefficient do you think there's anything, that we can provide, that'll like maybe some utilities, that we can provide in the SDK or even in the hose functions, that might help people write replay prevention mechanisms, that are more efficient, that you know use the ledger in a less aggressive way I haven't given, that too much thought honestly + +[45:00] honestly. But like I mean my general kind of perspective on this is you know it costs money to use the blockchain and people will be incentivized to do things, that cost less money. So so basically. If there's a reason to do a really inefficient replay prevention mechanism. Because it makes the rest of your contract much simpler or maybe it's the only way to even do it. Then I think people will do, that. But in the absence of, that need I think people will favor the super simple mechanism, that's, that's cheap whether I can guarantee, that I don't know and whether we can provide some utilities I'm not really sure I mean like a really simple replay mechanism like a sequence number is basically like you have a map you look it up you check you increment, that's it could be hard to make it much simpler I mean like we could provide some like library functionality, that literally does, that exact thing. But the thing is. If you have an account, that already has per user data you would probably want to of like wrap the + +[46:00] non-sin with the other per user data. And then the helper is not actually helpful in, that case so, that's kind of the main perspective there. But I have been thinking in general this is like a bit of an aside, that it would be really helpful. If the SDK provided some types like for example like there's the sc val map type, which is like a map in the sense of like a conventional map. But i've also been thinking like sometimes it's like you want to look at the data as in like I have a bunch of data stored in different ledger entries and be cool. If there was a like a map type, that did, that very easily instead of having to use like the contract put contract gap etc in [CAP-53](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md). So maybe there's some interplay between, that and a replay prevention mechanism, that we could learn from yeah I mean I think, that yeah all those things are going to like as we develop like even basic applications will factor you know like + +[47:00] this type of basic functionality in some traits right, that people would just use I'm not actually too concerned about yeah people having to write it. Because we are going to write it I think for more like maybe like different type of like replay prevention like I think, that's kind of the nice thing about this proposal is, that I know in the past we discussed like you know potentially doing very different things like where you have like those like ephemeral type of you know like things, that only work in a specific time period right so, that you could in theory like replay in a specific window. But in some design it's actually acceptable. And then you end up with like much simpler client-side code. So so yeah + +[48:00] and can I ask it don't question. But you have a function in here called knots of how does, that work what does, that do it's just a contract internal function I don't know. If the com comments emphasize, that they don't it's not a host function basically it just reads the data I just read the larger entry and find the nonce in, that would probably just be a single you know integer sort in a lever entry non-sub in those cases is user maintained data associated with an address yeah exactly contract maintain did I want to say. But yeah okay in general I mean I'm a broken record I don't want to waste too much time with this. But I'm I am extremely nervous about suggesting, that users roll their own authentication mechanisms I think this + +[49:00] is just this is just asking for disaster. But but I understand, that we've been around this like a lot. So you don't need to convince me I mean providing the really simple authorization, that mechanism is the only one I would strongly favor, that approach this just feels like it's going to be a disaster you're going to have people who completely fail to. Because this data this code path. If you get it wrong is came over for everything and it's. So easy to get it almost right and your tests pass and you deploy it and you think everything's fine. And then it's not. So I'm I would love to not have users writing this code. But okay. So you know obviously I think we need to dispense the value in this proposal in the meantime we have 10 minutes + +[50:00] remaining and i'd love to hand it over to Siddharth to talk a bit about the changes to the smart contract life cycle and potentially any remit open questions, that we need to answer yeah. So the most recent change was a small one about how the contract ID, which is. Now a hash is calculated and you can we can look at, that change it's pretty simple where the it's created from a transaction you hash source account a user provided salt and. If it's a contract created within another contract you hash the parent contract ID and assault provided by the contract I mean. If there are any questions there we can talk about it. But I think the more interesting thing are these two other points I want to bring up one is mutability, which is do we plan on adding like initially the CAP right. Now does not have mutable contracts. But the question is should we leave, that question open for the future + +[51:00] or should we just say contracts will always be immutable and the second thing is we allow contracts to be removed. So the CAP has a host function to remove the contract code entry. So I think we can start with the immutability question right. If so. If we do allow mutable contracts in the future a big question was how do we deal with versioning and I was taking the approach, that let the contracts deal with it. So for contract a calls contract b and contract b is mutable contract may just trust, that just trust contracts bees creator or right. So I think grading had some issues with this graham you want to talk about this well I think they're just I keep coming back to the general sense I have, that + +[52:00] cross-contract calls are something like dynamic linking or package dependencies in software in general right, that smart contractor software and this is a general software versioning problem and in general software has like natural tensions around versioning, that people frequently want to lock to particular versions. But they also frequently want to get the newest latest and there's a concept of newest latest, that is not compatible, that is often expressed in major version numbers or separate apis or separate names for things and I'm concerned, that we are not reproducing any of the infrastructure, that would be normal to support points on, that natural tension. So I think it is worth trying to provide some of the + +[53:00] building blocks, that people are going to provide themselves anyway. Because it's bad enough to have like versioning it's worse. If there are multiple versions of the versioning system and you have to like opt into different versioning regimes depending on, which ecosystem you're adhering your contract to like I understand there's just there's this natural tendency in our conversations here to try to push everything to the ecosystem and let the ecosystem figure stuff out let them develop patterns in the smart contract space, that just solved the problem. However contract users would like them to but, that is not actually as much of a solution as I think it sounds, that really strongly introduces the possibility of totally incompatible regimes developing in parallel or inadequate regimes, that miss some important aspect of the design. Because they were cobbled together in a hurry. So I wouldn't mind us spending enough time to be able to provide the basics, which is like I want to pin to a version + +[54:00] I want to pin to a major version and only get security updates or I want to follow you know any new features and additions, that people had including modifications upgrades whatever I feel like there's got to be a future where those are things, that someone's going to provide and it's our position to potentially furnish them maybe it's not but, that's what it feels like to me. But like at our layer like is it just having a way for to have like committable versus mutable contracts and like the versioning is metadata basically and it's up to. When you write a contract you know to decide how you want to use this metadata okay like. When you make a cross-contract call at the moment a cross-contract call only identifies a contract ID right and it does not say call this + +[55:00] but give me version five or whatever like there's no version information in cross-country calling right now, which means we're essentially always dynamically linking to either immutable exactly the same thing or immutable whatever the person up to updated, that contract with the thing is, that I mean you have a bunch of things, that feel like. If we do, that at the kind of protocol layer we're kind of printing those like for example like. If you're talking about different versions right of a specific contract like you have what is like there are a bunch of questions around like you know you have a 1.0. But maybe the 1.1 is actually deployed by some somebody else right like it's not even the same author like how do you deal with those type of situations + +[56:00] like who do you trust to be 1.1 I don't think we have the notion of oh like a of actual like or like you know the organization or whatever, that is the publisher right over have a contract yeah I agree we don't have any notion about, that I'm advocating for us to come up with a notion rather than saying but, that notion like I think it becomes I think in, that space I don't think you can necessarily come up with a one size fits all. Because it's not like in you know in like normal I mean they say like normal software like you have a company and they ship their thing and, that's kind of it like here and also like. If you depend on a specific version right as a my installer basically is going to just grab cause the os right to grab, that version + +[57:00] that I depend on only in a blockchain type of situation. If that other version like the cost of keeping, that other version around is actually on the on whoever deployed, that contract so. If you say like oh I want to pin all version like everything is always pinned I think, that the implication there would be well okay the publisher is. Now has to keep around all versions of their contract, which is kind of you know weird okay. So is could you make a concrete proposal here are you talking about you would like all contracts to be mutable immutable not immutable. But mutable. So you can mutate them and. If I ever want to have a pinned + +[58:00] version I vendor it is, that what you're saying not necessarily. So I think it's like yeah you could have always vendor of course I mean, that's a solution and I'm okay with, that I just I think we have to think through the scenario is what I'm saying and I think. If if what we're saying is we're not going to give any thought to the scenarios whatsoever. And then we're going to tell everyone to proxy every single call they make. Because that's the only, place they're going to have any ability to enforce policy I think we're kind of losing a good chance to shape the system I think it's more like what are like the things, that are like support like a, that you get with tooling, that you know we can provide versus things, that are actually baked at the protocol layer like I think, that. If you have a way to say. Because you know you'll also have the other problem right of + +[59:00] contract discovery on the network like you know, which net, which contract do I trust you know versus the ones, that I partially trust versus the one I don't trust right there are some, that were basically you trust you try, that contract, that even. If it was mutable you trust, that they are not going to or you're actually fine with them modifying it in other situations you do want to pin. Because you have like a you know an actual I mean maybe it's a stability thing or whatever right there you have like other problems there. So so some of these data yeah like the mutability aspect I think, that's actually yeah, that's a property of the contract so, that you know basically can I even directly depend on this from my contract or is it potentially going to change under me. But then you have a different problem I think, that is kind of like. When you do you + +[01:00:00] know like with you know. When you have your manifest files. When you're. When you decide, that you're going to pin your dependencies right in your own program, that decision is something where I think we have to just develop the right tooling. But it's not something, that actually the you know the network doesn't shouldn't have I think an opinion on you know like the this even the schema of how you pin things like I said I guess I'm saying, that the network has to provide whatever is necessary to support it. So we come up with what the network needs to support it and I'm not seeing, that developing in our conversations we talk about like bad things can happen. And then we throw up our hands and say obviously we can't solve it like no we have I think we don't fight this one I agree we just try + +[01:01:00] to figure out what the pattern is. And then figure out what the network needs to actually support it right I think it's we need to have to kind of to solve those problems what I'm saying is, that we don't need to. Because like the things, that i've heard. So far they are in the context of a CAP we are saying this is going to be like we kind of forced a specific model at the protocol layer oh and I see a raised hand who is, that I think it needs to be in a cup like it needs to be in the design rationale for this cup like. If we're saying, that we're going to provide this limited set of functionality and you can go and do whatever you want we still need to provide in the design rationale this is how we expect it will solve this problem of versioning I mean I think like at least I can already see like there are some gaps here you know. If we say, that + +[01:02:00] you're going to do all contracts are immutable. So with an eye and you're going to do versioning yourself data migration how's data migration going to work. Because like right. Now we have [CAP-53](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) and contracts can't access other contracts data. So how is contract you know v1 going its data going to be migrated over to v2 and then. If we say, that you know v1 and v2 are gonna coexist at the same time you know how do contracts do, that seems rather complex yeah and especially yeah and actually like yeah in the context of [CAP-53](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) I agree, that it's also you would change your like. If your ID on the network is your contract like your you know the contract is your ID type of thing you don't want to be changing your ID like. If you upgrade, that contract + +[01:03:00] we got a request to speak from the audience I know you don't know how to take them I'm not I don't think after right it's android question hello can you yep we can hear you great thanks appreciate the conversation and glad I was able to hop in I'm going to keep it short. Because it's going to be slightly off silo of what you're discussing I am a week away from dropping a tokenized community with a coin, that I'm bringing over from raleigh into solana and I want to develop on Stellar and i've looked up online there's some people, that you know help you punch I you know I'm just trying to understand based on what I know with moving forward I mean who can I connect with so, that i + +[01:04:00] can understand the liquidity and how the Stellar network works and how I can make sure, that same liquidity I'm probably going to lose it. But how I can transfer over to the Stellar network and really make sure, that I look at all aspects of where I'm launching. So they don't have to wrap and do it differently in a month do you know what I'm saying yeah cassandra I appreciate the question this specific conversation is about a very like specific topic technical topic and you can go to the to one of the other channels support for example ask, that question and i'll be happy to help you there okay thank you I'm just looking for a personal contact I'm not bringing in the conversation I just I can't find anyone and i've left a couple messages in the chat. But I'm not sure who to talk to. So who is just speaking. And then i'll tag you okay. So we're over time and I think + +[01:05:00] there's some really interesting discussions here, that we probably need to continue either on the Jump Cannon dev channel here on Discord or on the mailing list. So I think we should go with, that thank you all for joining and tuning in and have a great rest of your day you + +
diff --git a/meetings/2022-06-02.mdx b/meetings/2022-06-02.mdx new file mode 100644 index 0000000000..9c481b97b5 --- /dev/null +++ b/meetings/2022-06-02.mdx @@ -0,0 +1,168 @@ +--- +title: "Asset Interoperability and Footprint Planning" +description: "A Jump Cannon protocol discussion focused on smart contract footprint planning, CAP cleanups, and ongoing debates around asset interoperability between classic Stellar assets and Soroban." +authors: + - david-mazieres + - graydon-hoare + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: + - legacy + - CAP-46-1 + - CAP-46-2 + - CAP-46-5 + - CAP-49 + - CAP-52 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This discussion continues Project Jump Cannon work with a mix of technical housekeeping and deeper design exploration. The first half of the session focuses on simplifying and clarifying Soroban’s core data model, including removing unused concepts from earlier CAP drafts and tightening how contracts declare their interaction with ledger state. + +The conversation then shifts to one of the most debated open questions in Jump Cannon: asset interoperability. Participants explore what it means for classic Stellar assets to work safely and predictably in smart contracts, balancing ecosystem compatibility, issuer expectations, developer ergonomics, and long-term protocol flexibility. + +### Key Topics + +- CAP-46 (now CAP-46-1) cleanup: + - Removing the unused `Box` object from the value/object model. + - Reducing cyclic references and simplifying host/guest data structures. +- Naming and type clarity: + - Reverting to `u63` as a concise representation for non-negative 63-bit integers. +- Contract code and data representation: + - Merging contract code and contract data into a single ledger-entry model. + - Treating code as data while keeping contracts immutable during execution. + - Open questions around future mutability, upgrades, and lifecycle semantics. +- CAP-53 (now CAP-46-5) footprint planning: + - Introducing an explicit `footprint` type to replace implicit read/write sets. + - Declaring all ledger keys a transaction intends to access for determinism and parallel execution. + - Decision to allow full ledger keys to preserve future interoperability options. +- Asset interoperability debates: + - Defining the right balance between no interop and full classic-feature parity. + - Wrapping versus direct access models for classic assets. + - Preserving issuer controls, compliance expectations, and existing infrastructure. + - Whether smart assets and classic assets should look identical from contracts. + - Lessons from claimable balances and opt-in feature expansion. +- Ecosystem considerations: + - Impact on wallets, exchanges, custodians, and issuers. + - Desire for standardized authorization paths to avoid fragile, custom cryptography in contracts. + - Tension between simplicity, performance, and long-term extensibility. + +### Resources + +- [Project Jump Cannon: Choosing WASM](https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm) +- [CAP-0046-01: WebAssembly Smart Contract Runtime Environment](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) ([Discussion](https://groups.google.com/g/stellar-dev/c/vkzMeM_t7e8)) +- [CAP-0046-02: Smart Contract Life Cycle](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md) +- [CAP-0046-05: Smart Contract Data](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) +- [CAP-0049: Smart Contract Asset Interoperability with Wrapper](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) +- [CAP-0052: Base64 Encoding/Decoding](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0052.md) + +
+ Video Transcript + +[00:00] The changes, that we're discussing today and what we're focused on. Now is relate to Project Jump Cannon. So it's a future protocol upgrade these changes would be included in it and project cannon aims to bring smart contracts to Stellar we've actually modularized a lot of these changes. And so there's a whole slew of CAPs, that are related to Jump Cannon CAPs 46 through 53. Since this is a pretty technical discussion. If you want to follow along I urge you to take a look at those CAPs. If you're interested in joining the discussion I urge you to join the Stellar dev mailing list and. If you want to also follow along with Jump Cannon development more generally you can do. So here in the Jump Cannon channel and also in the Jump Cannon dev channel. And so today again we're focused on Jump Cannon related CAPs there's a pretty there's a lot on the agenda a lot of small stuff. But there may also be some bigger issues, that we're talking about. If as you're listening. If you have questions the best thing to do is to put them in live chat in text i'll do my best to + +[01:00] monitor it I mean the goal here is to actually have substantive discussion, that moves forward some of these changes and, that answers questions and allows continued development. So we may or may not have time to address the issues in live chat but. If we don't do it. Now during this meeting we'll definitely take a look at, that channel afterwards too. So feel free to put your questions or thoughts there and, that's the end of the intro everybody here it looks like we got a full house okay cool. So I guess to start off with we have an agenda, that has like and I guess we can just like sort of start with what's at the top of the agenda I also know, that there's some questions about interoperability, that we may want to address I think maybe we start with 46 removal of box agenda item I put a bunch of agenda items on here you can hear me + +[02:00] everyone hear me yeah we can hear you at least I can hear you yes okay yeah I put a bunch of agenda items on here. But they're all really straightforward. So I can just grease them really quickly boxes in [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-01.md) it's a it's an object, that contains a value I think this is actually just an artifact of the notion, that we might have had mutable objects. Because I think as far as I can tell it's basically indistinguishable as long as we have immutable objects and we haven't ratified pointers there's no way to differentiate an object, that contains a value from just the value. So lee suggested, that we get rid of this and I'm happy to get rid of it until we discover or some reason to bring it back in the future go on once go on twice I guess I have a maybe a question yep. So is, that does, that mean. If later we decide to have mutable host objects they would just have to be boxed like, that's right, that's it we'll just bring back. So it's actually + +[03:00] completely back I mean forward compatible. But yeah we're just bringing the object like back in the future it's just, that since we don't have any reason for it right. Now then we might not delete it yeah makes sense okay. So yeah I just as a part of deleting this I want to get rid of the cycle this gets rid of our only cyclic reference in the structure. When I get I want to get rid of the option on the box. So we definitely can bring this back later on but. If we do bring this later on we should put a little bit of effort into making sure, that the xdrpp the C++ XDR lib can do non-optional cyclic references, which there is already a plan for. But yeah just calling, that out yeah it's implemented in C++20. So either we can move Stellar-core to C++20 or we can back port it to C++17. Yeah, that's a. So somewhat bigger kettle of fish. But I mean we have a workaround anyway. If we really + +[04:00] need it we really need to bring it back we can move to an optional box like this. But this there this is motivated by getting rid of so, that'll actually make a lot of the on the wire stuff four bytes smaller okay second issue, which is very minor we went with positive i-64 as the we renamed u-63 like two weeks ago into positive i-64 to be a little bit clearer. Because 63 is a weird number and everyone who sees it is like what is, that why should there be a 63-bit number and it's actually just like a 64-bit number, that happens to be positive and John pointed out, that you know zero is in there and depending on, which mathematical tradition you come from zero is either both positive and negative or neither positive and negative and some people believe non-negative is the correct word to use. When you're including zero and a set of numbers, that include zero and all the positive numbers I don't personally care I'm perfectly happy to have the word positive in there I think it's reasonably easy for people to understand I could also put in non-neg or n or something like, that. But I figure terminology wise i've got you all here we're gonna we're basically gonna merge this right. Now so please commit to a + +[05:00] terminology does anyone have a preference I think non-neg is fine. If that's more accurate. But I don't really care I personally I think u63 was fine. But yeah non-neg is fine too I did too I don't care I like u63 personally it's short and it says what it means literally everyone who sees it has like this weird sour note in their mind and is like why is, that 63 what is wrong with you, 63 is not a computer number 64 is a computer but, that is actually the correct reaction. Because it is a weird type. So like except it projects to and from i-64. So the type, that you can convert to and from is i-64 actually why i-64 is not u64, that's the symptom of the language we're using too like you know. If we were writing contracts in zig u63 would actually make sense it'd actually be a thing I mean the thing is like I don't think the weird reaction is to the name i + +[06:00] think the weird reaction is to the concept. But then in context it makes sense. So why not just pick the more accurate name. But this seems like a mega bike shed thing it is a mega bike shed i'll go back to u63. If everyone wants u63. But I will not entertain another comment on this after this meeting. So this is it yes thank you I mean it's like boolean right like boolean is not one bit in most like. If you have a variable like, that right it's already the same thing. So I'm fine with I'm actually I prefer u63 as well. Because it actually tells you what it is everyone wants u63 okay we're going back to u63 next issue was [CAP-47](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md) and [CAP-53](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) we noticed part way through last week, that assuming, that we want some kind of mutability of contracts, which apparently we kind of do or even. If we don't want mutability of contract we still want a way to load contract from the middle of execution. Because we want to be able to call them. And so at a representation level + +[07:00] the XDR, that represents ledger entries with separate ledger entries for code and data seemed possibly to be overkill. And so we talked about it and came to the conclusion, that it would probably be tidier we actually had like in the implementation we had pages and pages of sql code for both of these types and they're pretty much identical cedric and I worked through this stuff. And then they really only differ on a you know one field, which is whether there's an additional sub key. So we thought, that it might be nice to just merge those two and there's a single type of contract related entry, that still has a contract ID they all have a contract ID and has a sub key and the sub key has one magic value carved out we have lots of places to carve magic values and we can just carve them out of the sc static value set and we just have a designated key and all it means is the current contracts or the owning contracts own wasm code. And then we just do our + +[08:00] binary and sc binary type. And so we implemented, that it seems to work fine I have not updated the CAP yet I want to run, that by everyone make sure everyone's okay with it leans us a little bit towards mutable contracts. But I believe a lot of our design discussion has been drifting in, that direction anyway it does not wed us to, that we do not have to we could also special case it and just prohibit rights to, that key. But it sort of has, that tendency of treating code and data as the same thing, which at some level they are. So I spoke to Siddharth about this yesterday quite a bit in the afternoon. But one of the one of my main concerns around mutable code and this is much more about the mutable code aspect of it than about the merging them the merging it I care less about other than the fact, that it like makes them like default mutable and we. Now need to opt back out of, that which I'm about to explain like you can't just say the code is mutable without having some rules around what, that means and we have not successfully agreed on what those rules are + +[09:00] what happens. If you mutate a code mutated contract, that's currently running what happens. If you delete a contract, that is running. Then call another contract, that called back into the first contract, which contract runs the second time like these things need to be well defined. So we should like start from the fact, that we can store code as data. But you can't mutate it. And then figure out how to mutate it. But like we don't have answers to those questions yet or at least I haven't heard answers to those kinds of questions yet no I agree we don't we did. However have in [CAP-47](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md) a function for writing a contract code entry. So whatever, that host function semantics are they're the same semantics here well the thing is, that yeah it's like what John is saying like currently right like. When you write a ledger entry the this is observable by anything after + +[10:00] like immediately after whereas here I think we have to decide and we probably cannot make this actually this cannot be true I think. Because this is going to be probably scoped with the tied up to the lifetime of the wasm runtime, that's actually executing the code so. If you have like. If you call yourself type of thing we're certainly not going to hot patch the running one exactly yeah. So this is. But this is like a place where this is actually deviating from what we do for data. So we have to really you know specify this and think about it very hard, that's exactly what I was saying Nico you just said yeah no I know like it's kind of scary in a way, that I don't know what the right answer is on I know you see I don't actually think this is particularly scary I think there are really only two possible options and they're both fine and you know one of them is you do + +[11:00] what unix does, which is, that you the executing process is essentially a disjoint as soon as it starts executing and so. If you rewrite the file, that's fine the next exec to, that we'll get the new code. But like. So did we actually decide on reentrancy for example like what you can or cannot do well actually you get you busy in unix no you can write a file. While it's running you get busy on windows the file's locked on windows. While it's executing on unix you can rewrite the file as far as I know. Because I do upgrades of running programs all the time and it works like. If I upgrade chrome. While it's running. If I do apt-get install bash it works in my backup I'm telling you I literally just tested this and I get e busy I mean this is kind of a + +[12:00] side note. But I think what you're doing. When you upgrade is you're unlinking the file and creating a new file. But you can't like write to an inode, that's currently executing okay sure unlike you can re-link, that's fine well unlike and create a different I know my point is it's totally doable in some contexts and in other contexts you block it and those are the only two options here. So and I think they're both fine there are other options okay there's the only two reasonable options the other one is what hot patch your existing running program, which you're not going to do no there's at least one other option, which is like you can't update something. While it's running you have to delegate, that's blocked, that's what I just said, that's the other option block or allow what exactly do you mean by block I guess it fails you try to make the right and the right fails it's just the special case we just don't know, that's what right to the running code fail. Because we decided. But then how do you actually do the update I guess like you're just saying you can't mutate it you can't take your own you can mutate someone else's okay. But like, that needs a lot of specification in and of itself + +[13:00] and I was providing an example of how you can do, that you can't just like I can't just modify your contract right like there's obvious, that's obviously not acceptable. So but we have a we have like manage contract transactions right, that's the out-of-band technique. But like, that doesn't work like it works in a very like very tight sense. But like what. If you're a dao, that's managing a smart contract you'd like to be able to manage it from small right okay. Then we there's like a whole bunch of space here, that I'm like it's like sure maybe you have two high-level options allow or block. But like block has many sub-options okay I don't see this is relating to the question, which is do we store this as a ledger entry or not I agree, that individual host functions need to have semantics defined for them. But okay I guess what I'm getting at is I would accept this change. If we make it so, that you can't mutate the code. While it's running right. Now and we can figure it out later okay + +[14:00] but. If you're going to allow it right. Now then we need to do a lot more work and it's not a good idea sorry. If we're. If we allow rights to the currently running contract right we need to do more work okay. So what I'm going to specify, that agent files are currently immutable even. If it's stored as data no the currently running contract is immutable. But we as I just said we have no mechanism to write to a not running contract or not a good mechanism we should just make it immutable. If it's okay sure like what I'm saying is let's separate the mutability problem from the representation problem okay. Then we can make progress. Because like I don't want to approve this. If we don't have like I don't want to prove it in the mutable case. If we don't have a good story about me to build and we don't right. Now gotcha okay. So only reading only the read path works for this key right. Now and the right path just fails + +[15:00] I have a question about not about the mutability. But the representation. If we're making it a ledger key, that you have to write and in the future we do decide to support mutability will it mean, that the only way to update the contract is to write the entire binary every time or. So like I'm just wondering about. If somebody. If we support mutability and somebody wants to write a contract where they swap back and forth between multiple implementations like rolling back or something like, that and just the cost of writing an entire contract, which might be a couple of kilobytes versus store. If this is just data storing this data under you know just like regular data. And then having a point into, that data + +[16:00] does, that make any sense yeah it gets into what we talked about a little bit in yet another one of the threads this week, which is like is there any kind of delegation mechanism I think it's a little bit awkward. Because well first of all we actually come up with a delegation mechanism, that we can all agree on, which is going to be weeks of conversation. But also you wind up needing to pre-flight every single transaction in, that case. Because you need to resolve the current delegate well maybe you don't need to pre-flight them. If you can guess where the delegate is currently pointing. But anyway I feel like it sounds like it increases the complexity of this and, that's like I'm not interested in increasing the complexity I'm just curious. If we're just any sort of future limiting something we can do in the future I don't think so. Because again I think + +[17:00] think 47 already has a function called write contract and it takes a binary like, that's the point here is to just absorb the two representation questions not necessarily to solve do we have proxy or delegation or whatever, which we might have. But you know. When we had, that conversation this week John was really adamant about like let's ship without and see what happens and you know I can live with, that. Because we it's true like we could make the wrong delegation mechanism. And then we'd be stuck supporting it forever. So make sense zoomed in focused on the representation question nobody has any real objections okay I think it's really not contentious okay. So the final thing I had on the + +[18:00] agenda, which is the footprint type, which is like even less contentious i'll just be extending [CAP-53](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) with a new data type, which is called footprint and it just contains two lists of ledger keys the only really possibly contentious aspect of this I think is whether ledger keys full keys should be there or whether they should be restricted to only contract data values or value contract ID pairs or something like, that something, that is a little bit tighter than a ledger key but. If we do, that it means, that we are essentially fixing the impossibility of interacting with other ledger entries into the protocol going forward and I think the interoperability question still leaves, that open. So I was assuming, that they would be a full ledger key at least, that in conversation with John he again fairly clearly suggested, that he would prefer to keep, that door open and I'm happy to go with, that. So does anyone feel strongly what do you mean by footprint here you just mean the like sorry like the footprint is a term, that + +[19:00] was introduced in CAP oh the other one the data yeah it's what we have been calling up until recently the read write set I found, that actually the paper, that introduces this concept of deterministic execution uses the word footprint and I think footprint is actually a great word to use here. So i've decided to start using this one. Because otherwise you wind up with the read-only part of the read-write set and honestly linguistically it's a little bit clumsy nobody cares about the footprint okay cool i'll just make it a lighter keystone, that is the end of my agenda items I yield the floor to discussion of interoperability or whatever else you want to talk about thank you John is this a discussion, that you want to kick off have questions + +[20:00] are we switching. Now on onto like asset and drop stuff is, that where we are yeah. So I think, that all we've sort of gone through all of the existing stuff on the agenda and the only thing, that was a big question mark was the asset interoperability stuff and I think, that's where we are at this point yeah I mean I think the only real thing to talk about here, which like we have not been able to come to agreement on is just like what are even the requirements what are we trying to achieve me Nico Tomer lee talk about this problem like this problem I don't know like three times a week right. Now and I don't even think we're all talking about the same thing. So I think we just need to talk about our feelings. So anybody can take before from, that yeah I love talking about my feelings. So i'll start yeah. So I think you know the basic + +[21:00] requirement from asset interop in my opinion is. So let's maybe it's worth thinking about like the full spectrum of interop or about the edges and where we are in the middle. So you know I think like on one extreme interop thing we just do nothing right and this is you can look for example at like aurora on near, which is you know a whole blockchain running inside of the blockchain, that has like no interop whatsoever and. If you want to interop with the parent chain you need to go through you know various bridging solutions. So I think, that's a very extreme version, which is no interop at all and it basically means, that we're not taking advantage of the existing Stellar ecosystem at all. But it's extremely simple we just don't need to think about legacy interop at all I think on the other extreme side of interop you have full interop with every primitive on the current Stellar network. So you + +[22:00] can do things like interop between smart contracts and AMMs and the orderbook and sponsorships and like the wide everything, that Stellar proposes and, that you know is great for like supporting legacy. But it's also terrible. Because it means, that we're bringing a lot of the you know technical debt and the idiosyncrasies and in the classic protocol to the new world. So I think we need to find somewhere in between and I would say, that even, though there are a lot of disagreements in this room I think the basic thing, that we agree on is, that assets are the main point of interoperability like we're not trying to bring the orderbooks we're not trying to bring AMMs we just want to make sure, that classic assets will operate in smartland and you know basically like asset issuers still have their infrastructure intact and they don't need to make changes + +[23:00] and. So we have you know service providers like you know Fireblocks and BitGo all these folks you know their services are still you know viable in working even in this world without a change does, that make sense to people oh maybe i'll pick it up. Because yeah like with the without a change it's kind of like, that's the part, that's right, that is kind of reloaded I would say like the like as soon as you as we say there's basically like there are two parts to it right there is the do I expect on day one as soon as we have like smart contract capabilities can I use any classic asset on the network kind of + +[24:00] automatically right or not using smart and, that to me is like one version of, that extreme inside this box, that you described where you know no change is needed and I'm we've been kind of trying to and John probably can expand on, that. But like it seems to me, that. If we try to make it, that extreme we are kind of really over constraining what type of things you can do in smart in terms of those assets right. Because you expect those assets to be represented by trust lines and you have like all those things right, that comes with, that and you have like the semantics, that are really specific right to classic + +[25:00] so, that there is like, that version I think there is a maybe a more nuanced you know version of this, that is yeah like what do you mean. When you say like no change does it mean, that yeah you want to have people, that issue tokens on Stellar they want to make sure, that they continue we don't break their compliance story or whatever they have right in terms of like what they are what they sign up for basically with the network right like we're expanding the set of capabilities and this one is it sounds more to me like there's like some room there where you can say well maybe people need to like issue us in this context of classic assets need to have like a + +[26:00] kind of opt into smart capabilities and maybe they have like a way to specify, which subset of the capabilities they are interested in and. When you do, that. Then you have like maybe a different way to represent those assets kind of like. When we introduce claimable balances like claimable balances are actually a good example of where we kind of introduced this way to kind of go in a way like, that was actually breaking a little bit in terms of compliance. But at the same time we try to be for tokens, that don't have a lot of restrictions claimable balances are kind of can flow freely right and to me like trying to do a similar thing with smart is probably like a + +[27:00] would be probably a good middle ground. But for, that it's an opt-in so, that means you don't get automatically like everybody you know every classic asset kind of shows up on day one yeah. So I think claimable balances are a really good example you know they're a fairly novel concept and for the most part aside from you know a small set of like super specific Stellar wallets it we haven't even seen support for them and with major service providers. So it's worth you know. When I'm talking about minimal change I think it's worth dividing this into the different stakeholders, that we're talking about. So I think on one end you have the more kind of like institutional cross-chain services things like exchanges things like you know things like Fireblocks and BitGo things like + +[28:00] you know even like circle as an issuer like these folks you know take the path of least resistance it's been even difficult to get muxed accounts implemented with these folks. So I would say, that from their perspective we like zero change is preferable. Now for the actual touch points for these smart contracts obviously it's okay to introduce change right like. If a wallet wants to interact with some crypto primitive some smart contract. And then obviously they need to introduce changes and it's okay you know. So you know on the wallet side itself and from a user perspective, that's trying to use the smart side it's definitely okay to introduce some changes and can I have actually have a question on exchanges like. When you say like exchanges don't need to make sure well like exchanges today don't most many exchanges don't support any Stellar assets other than lumen + +[29:00] so like why is, that relevant in, that conversation it's relevant. Because of USDC it's relevant. Because there is a group of exchanges, that support USDC and don't right it's growing you know and it's growing. But like over time do you I mean wouldn't you expect people like. If they are like any good assets being issued on the smart side, that they would be supported by those exchanges. So it's not they are going to like a phone exchange it's a bit of a different story right they have existing things they're not issuing tokens right they are it's more like they have a wallet in classic and you want their wallet to continue to work right and I mean I think at some point. If they decide, that smart assets are interesting and they want to opt in to make the changes great. But they're probably not going to be the first people to do it's probably going to take them a. While and it sounds like we just don't want to break them yeah, that's fine. But like I don't think + +[30:00] none of the proposals. So far are saying we would break classic wallets no. But we are saying for example you know. If we're going to recommend issuing on smart going forward. Then the issuer has like this tough decision where they need to make a decision whether the issue on the old path, which maybe is not recommended anymore. But is what the exchanges actually know how to support or issuing on the classic side and you know based on our previous experience it's going to take you know a whole lot of time for service providers to start enabling something new I get, that I'm just kind of a bit skeptical about like the for exchanges in particular, that they won't do like basically I think + +[31:00] there are two types of exchanges the exchanges, that are going to be supporting whatever the latest set of functions I mean they are going to kind of keep up maybe like you know with some delay. But they are kind of keeping up. And then you have others, that are very conservative and going to be slow and I think, that's why for example I think we don't have still USDC on coinbase I think right Stellar USDC coinbase according to foreign sources doesn't have any multi-chain assets right now. Because of extensive technical and product debt, that coinbase is a bit of a special case. Because they don't even support they don't support any like usd any non erc20 USDC. But we do have and like it's a pickle it's difficult. Because exchanges you know there's a bit of a chicken and egg, that you know they want to see demand. And then it's hard to create, that demand without you know these assets + +[32:00] supporting supported domain exchanges. But I think like the main thing I'm trying to together here is, that like it's hard to get exchanges to do changes and the service providers as well like folks like BitGo Fireblocks right and right. Now is you know the way, that you issue assets on Stellar you know the actual distribution is just regular payments right. So you know you can do it on all these platforms. If you introduce a new way to do it. Then you basically tell issuers you can't use these service providers I mean, that's not really what you're telling them is you can't use those service providers. If you want to do it the new way right. So what you're suggesting is having like this + +[33:00] like a split thing in the ecosystem where you say hey you can issue an asset in one of both ways this way will give you like you know like a shorter path to exchanges and you can work with you know various service providers and the other path will give you what John I don't know what it'll give you the power to do whatever you want the question is what do you want right. If they don't want those things you probably shouldn't do it right yeah like. If you think of what we have right. Now for the network right we are kind of optimizing for payments. If the token, that you're issuing is not meant to be used for directly supporting payments. Then there's no reason to issue it + +[34:00] as a classic asset like think of like you know NFTs or all sorts of random things like, that yeah. So as I said like. If you issue on classic. Then you have the entire breadth of like classic tooling available at your disposal right. Now like every wallet every exchange everything, that supports on classic you have at your disposal it's. So yes it's about payments. But it's just about like the ecosystem support I think like payments are also just transfers. So I'm it's also not clear to me why we would even say, that NFTs don't care about payments. Because people do care about transferring NFTs yeah. But we're not going to have, that is actually kind of broken. If you're modeling like, that's kind of what we see today on the network right like the way your model NFT is on the network is extremely poor the experience, that you get + +[35:00] I guess one question is like do we think, that kind of the value of the smart contracts is going to come from people implementing new assets or do we think the value's going to come from like taking high quality assets, that are issued by people who are you know not you know, that are not particularly you know experimental. And then innovators are going to come and like make new use of those assets. So my suspicion is, that maybe this the latter is better and so, that kind of it's more important to interoperate well with existing assets, that might be issued by somebody else and to be able to like program those assets than to be able to create some new ecosystem, that's not, that's more divorced from the existing one. So I think there are it's probably both David. Because you're gonna have + +[36:00] you know the stablecoins USDC and such, that are going to be used heavily in these smart contracts. But you also are going to have things like governance tokens uni style tokens and these are going to be issued on the smart side you know we might say hey you know what we don't actually care about these assets being transferable as regular payment on the classic side and maybe, that helps with implementation. But we will see these assets. But I mean I guess I mean put another way like. When I talk to people who are using Stellar right it's often kind of two things, that draw them Stellar like sort of the perception of like high quality assets and low transaction fees and so. If like those are the strengths. Then we want to kind of make sure, that those strengths are we don't want to kind of like sacrifice those strengths and create a completely new ecosystem right we want to be able + +[37:00] to kind of maybe for higher transaction fees add more flexibility. But where like you can still do things with like low transaction fees and do things with like existing legacy assets right like, that seems to me like the thing, that we want to optimize for of course it's going to be like whatever Turing complete in general. But the thing we want to optimize for, that's going to make this special is the ability to also leverage these high quality assets and low transaction fees for people who are just doing simple payments and, that's not going to work for everything. But but there are probably a lot of cases where like you know people are going to want to use USDC for example right and maybe we don't want to get circled to like write some whole new smart contract to implement this we just want somehow like unilaterally people can write contracts, that do things with USDC + +[38:00] yeah I think we're getting into a lot of speculation and somewhat like religious discussions here John can you help us like fine tune like where do things get hairy in terms of interop and what kind of like you know decisions we can make to simplify, that I mean there's a couple axes of decisions one axis is like wrapping versus not wrapping generally makes everything easier implementation-wise like significantly. So but like the ux might or might not be better, that's up to your interpretation. If you don't have a wrapping interface so, that's one axis a second axis of is like should a classic asset. When used from the smart perspective look exactly like a smart asset used from the smart perspective should they + +[39:00] look identical should they behave identically and the third main axis is like should you be able to take a smart asset and easily send it back to the classic side, that access is more speculative I think. But those are kind of like the three angles, that one might look at this problem one thing, that is noticeable in other ecosystems is, that there's definitely the canonical way of doing assets right on the Ethereum ecosystem even, though you can write your own contract everyone just you know copy paste the or imports + +[40:00] the OpenZeppelin one you know solana has like the spl other ecosystems have like their baked in contracts for assets like people don't actually innovate all, that much with assets and like I guess my question to you is like can we make those like canonical assets on Stellar be the existing assets maybe I mean I think you'd be making a lot of sacrifices to do. So are they sacrifices worth making not to me what are the sacrifices I mean I think the biggest sacrifice is just like do we really want to have a 64-bit balance for everything + +[41:00] the next sacrifice is like do you want it to literally be exactly what exists today or do you want to build on top of, that more. Because like you would need like to do the stuff, that's common in you know DeFi you would need an allowances system on top of, that anyway. So it's not what we have right. Now we have all this compliance stuff baked in it's pretty unwieldy for a variety of reasons do we want to be married to, that for the rest of eternity I don't. So I guess what I'm getting at is you can shoehorn anything into anything send you I have a really strong opinion about the should you question. But I do have + +[42:00] I participated briefly the last time around and I just want to do ads since you're already doing requirements gathering here a somewhat narrow version of what feel like the requirements I would want to add to this question I don't actually have very strong opinions about the assets I do have fairly strong opinions about two minor points and I think they I think there's a wide variety of ways to achieve these but. If we're writing things down. If possible I would like to request, that users not be in charge of non-management unless they really want to I think. If we're in a situation where the user has to figure out how to operate a cryptography api safely and correctly themselves we're putting them in a very dangerous position and whether we accomplish, that by completely baking in a standardized path or just having a very easy to delegate standardized path or even just there's a host function, that has very simple signature, that's fairly impossible to misuse I'm kind of okay + +[43:00] with most of those approaches. But cryptography apis become error-prone really quickly and I don't want to surface a lot of, that to users unless they ask for it unless they're going out of their way to say I personally want to do some fancy cryptography for the average person who's just creating an asset I really want them to not be forced to copy paste and possibly get wrong the use of a cryptography api, that's requirement slash desire number one and requirement desire number two is ideally. If that code is gonna be in every single contract and it's the only thing, that differs from one contract to another or. If it's a. If it's standardized preamble in every single contract on every single path it would be nice just from a code size and execution performance perspective to factor it out as well. So I'm just I know the authorization point is not the only part of interop. But to me it's the only part, that I actually care about I do not care about the other aspects + +[44:00] right. So I think, that what you just described I think is, that's kind of what is touched on in all this yeah in [CAP-52](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0052.md) the authorization model, though for payments is and going back to, that right is it is different it's different and, that's why they are like things done in a certain way in [CAP-52](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0052.md). But I do agree, that I know as we try to figure out like the actual interop story with classic it would be we can avoid having I mean having like a yeah like good solid base implementation, that people can just you know import in their thing. And then just works mostly without having to implement those things and. But like the key part of, that the absolute most + +[45:00] important part of, that from my perspective is the authorization aspect of it you want to. allow or require people to fiddle other parts it doesn't matter quite. So much. But and user writing your own authorization group is just a disaster we have too many crickets can somebody make it I don't like well I do have a question. So returning to what tomorrow sort of started this threat this head where he started it, which was + +[46:00] just other networks have a canonical way like economical representation of assets, that people just use his question was can you know the current or classic asset just be, that canonical representation on says you know he's not a fan of, that approach the question is it worth trying to think about other models, that we could use for the canonical representation of an asset, that we could discuss or is it premature to just to get into, that right. Now I mean in a way you know like in [CAP-49](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) for example the wrapped asset is a canonical implementation you know of the classic asset. But it's actually something, that is modeled as a smart asset. So + +[47:00] so it's non-neg people would have to reinvent a bunch of things there you can actually standardize on exactly, that thing and, that's actually what is interesting about this approach is, that you the way you do it is you actually write a standard you know like erc20 type of equivalent you design it as a as thinking about the semantics as smart semantics first. And then you make it yeah interop with classic assets and there's only one way to do it. But yeah you do have standardization happening in, that world I'm not. So getting back to, that like I feel like I'm the only one talking like it seems to me, that the standardization is not necessarily the + +[48:00] sticky point. Because in all those proposals. So far there is actually a standard, that includes classic assets. So what else is missing right got it well are there other questions, that people want to bring up. Now in the last nine minutes, that would help sort of move the conversation forward or should we just call it I mean we got through a lot today i'll let people think for a minute + +[49:00] okay I think, that's a wrap. Then so thanks again everybody for joining the discussion anyone who's watching obviously we're going to post this you know to archive it on youtube later so. If you want to re-watch this, which I mean who doesn't right I watch these things six or seven times you can watch them there on youtube and also. If you want to participate in the discussion or you want to follow along please make sure to read the CAPs please join the Jump Cannon channels here in the Stellar Discord and also sign up for the Stellar dev mailing list where a lot of these discussions will continue asynchronously and we will see you back here next week for another Open Protocol Meeting thanks everybody + +
diff --git a/meetings/2022-06-23.mdx b/meetings/2022-06-23.mdx new file mode 100644 index 0000000000..a97acb9b03 --- /dev/null +++ b/meetings/2022-06-23.mdx @@ -0,0 +1,200 @@ +--- +title: "Soroban Fee Model and Event Filtering" +description: "An Open Protocol Discussion covering Soroban contract events, Horizon ingestion concerns, and the emerging multi-dimensional fee model in CAP-46-7, including storage rent and state expiration trade-offs." +authors: + - david-mazieres + - geoff-ramseyer + - graydon-hoare + - jay-geng + - jonathan-jove + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: + - legacy + - CAP-46-1 + - CAP-46-3 + - CAP-46-7 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This discussion continues Project Jump Cannon work with a focus on two major areas: how Soroban smart contracts emit and expose events, and how fees should be structured to fairly price computation, ledger access, and long-term storage. The session explores both developer ergonomics and validator-level constraints, emphasizing future scalability and predictable performance. + +Much of the discussion surfaces unresolved tensions: how much event data clients should be forced to ingest, whether protocol-level filtering is necessary, and how far the fee model should go in separating resource markets. The latter half dives deeply into CAP-55 (now CAP-46-7), especially the controversial idea of storage rent and state expiration as a way to prevent unbounded ledger growth. + +### Key Topics + +- Contract events and logging (CAP-51 extension, now CAP-46-3): + - Adding a formal logging primitive for contracts. + - Hashing logs and transaction results into transaction meta for verifiability. + - Proposal for error-only logs to reduce noise for wallets and indexers. +- Event filtering and indexing: + - Horizon’s role in serving contract events. + - Trade-offs between ingesting full transaction meta vs protocol-level filtering. + - Discussion of Bloom filters, RPC-level subscriptions, and light-client needs. + - Open questions around proving non-occurrence of events. +- CAP-46-7 fee model overview: + - Separating fees into compute (execution), ledger reads/writes (I/O), and non-market “commodity” resources (e.g., meta size). + - Motivation for multi-dimensional fees to reflect real hardware bottlenecks. + - Concerns about validator transaction selection becoming a multi-dimensional optimization problem. +- Wallet and UX implications: + - Reliance on preflight simulation to estimate minimum fees. + - How wallets might present complex fee structures as a single user-facing cost. + - Risks of overbidding when resource prices are aggregated. +- Storage pricing and state expiration: + - Modeling storage cost based on bucket list growth. + - Introducing rent-like renewal to keep ledger storage priced at market rates. + - State expiration as a mechanism to prevent free, permanent storage. + - Concerns about balances or financial state being deleted if not refreshed. + - Comparisons to alternative designs (archival proofs, infinite ledger growth). +- Open design tensions: + - Safety vs sustainability when expiring on-ledger financial state. + - Free-rider problems in shared contracts. + - Whether expiration semantics are acceptable for mainstream users and issuers. + +### Resources + +- [CAP-0046-01: WebAssembly Smart Contract Runtime Environment](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046.md) +- [CAP-0046-03: Smart Contract Host Functions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) +- [CAP-0046-07: Soroban Fee Model](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-07.md) +- [Project Jump Cannon: Choosing WASM](https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm) + +
+ Video Transcript + +[00:00] So welcome to the Stellar Open Protocol Discussion in these discussions we discuss and plan for changes to upcoming versions of the Stellar protocol right. Now we're focused on Project Jump Cannon, which will bring smart contracts to Stellar in addition to changes to the Stellar protocol it will also lead to the creation of a new smart contracts platform and all of the discussion, that we're having about this is being tracked and you can participate in it much of it is happening here on Discord in the Jump Cannon channel and the drum cannon dev channel there are also a series of Core Advancement Proposal or CAPs, that relate to changes, that would enable Jump Cannon or the new smart contracts platform CAPs 46 through 55 I believe at this point they're pretty modular. So each one sort of takes on an aspect of the changes, that need to be made in order to bring smart contracts to Stellar and we are working through those modules bit by bit and discussing sort of the segments necessary and the changes necessary to allow those segments to actually come to life + +[01:00] life at some point all of the work, that we do will go through the normal process in other words CAPs are sort of put up in this GitHub repository, that's linked to in the show notes they're discussed here they're discussed on a mailing list and they're discussed in Discord and after they sort of reach a point where they are stable they move from being a draft into a formal acceptance period finally they're accepted and implemented in a version of the Stellar protocol and before, that version of the Stellar protocol goes live validators actually vote to accept it. Now we are still fairly well we've actually made a lot of progress in the in sort of the Jump Cannon trajectory. But as of yet the CAPs, that we have in front of us have not been accepted there's still a lot of questions and today we will dig into some of those questions. If you who are listening actually have questions you can leave them as text in the live chat channel i'll try to keep an eye on, that we're certainly trying to move this discussion forward and have the substantive issues come to light + +[02:00] so. So you know we may not be able to answer all the live chat questions. But we definitely will later. If we can't answer them in the course of this meeting. So I think everyone is here and I think we are ready to kick off today I know, that recently there was a new [CAP-55](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-07.md) fee model in smart contracts I don't know. If it just hit the mailing list yesterday there were a few comments, that came in I guess I'm going to start by asking you nicola. If we're ready to discuss, that if, that's where we should start maybe yeah like we can maybe like just go over like a quick overview of like what's going on in, that CAP. And then you know like we don't have to go in details basically. So I'm not sure like we need to have like a lot of a pre-reading basically as part of this + +[03:00] this is, that good use of time of people yeah I think, that sounds good also Nico. If you can you know obviously a lot of these things are kind of like normal quote unquote in the world of crypto. But some of these are a bit more contentious. So I would like emphasize specifically like the contentious bits so, that we can have a good old-fashioned argument all right let's see and yeah I mean. So do we want to start with this CAP or I think there were there was also like the events one, that cida opened, that is maybe a little more scope I don't know yeah I argue let's start with the Siddharth one. Because it's less contentious and we'll probably be arguing this okay great. So we'll start there we'll end up at [CAP-55](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-07.md) sit down do you want to yeah I don't know. If everyone had time to read the CAP document I actually made a fix of this morning + +[04:00] morning. But I can give a quick overview of it and you can we can discuss after, that. So I made I added this change on to [CAP-51](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md), which is the host functions CAP I just added the ability for contracts to log data. So I added a contract logs back to the transaction meta and as a part of this change I we also moved the transaction result into the meta as well both contracts. So both contract logs and transact transaction results are hashed and a hash of these hashes are stored in the transaction result pair, which is this is how you would cryptographically verify them and one change I haven't made to the document yet. But we talked about yesterday was, that we're gonna add another contract log type where the logs are only emitted. If there's an error so, that's a very high level overview are there any questions + +[05:00] I guess like those logs they are like the equivalent to events in Ethereum yeah. So they well. So the way it would work is you know, that the contracts allow whatever they want since it gets sent to core or write some transaction meta and Horizon can serve them up in any way you want. So I was I would imagine, that you know. If you want to listen to a specific event Horizon would provide, that ability allowing you to you know write applications, that would hook on to specific events. So one thing, that I want to point out here is, that it basically means, that in order to sift through data coming in from Horizon as an ingestor you need to basically read everything + +[06:00] which may become a lot as the network grows in capacity Ethereum has this concept of this bloom filter, that's included in every ledger header so, that you can actually get a like a strong indicator whether or not the smart contract, that you're interested in or the account, that you're interested in is actually included or has emitted events in, that specific block should we consider doing something similar yeah I don't think I can't think off top of my head why we wouldn't do optimize like, that's, that area I'm sorry I can look into, that I got I don't think I don't see I don't think I see anything wrong with, that well actually I think it's kind of maybe like premature optimization type of situation + +[07:00] like there are probably better ways to do it than doing this kind of arbitrary broom filter thing like you know I can imagine for example like we have just made a stream you could have Horizon telco, which filters it wants to apply instead of kind of doing it after the fact. And then the mirror would be a subset of the meta like maybe like you're not interested in ledger changes let's say maybe you're not interested in classic transactions you know like all those things well you're assuming a Horizon here and I think, that's like part well there's always a consumer right of but. If you want something, that's like an application specific consumer, which you do see in other ecosystems quite a bit you know. If I'm developing a dApp and I want to have a + +[08:00] you know like a stream coming in of my specific information why should I run a full-blown Horizon rather than some sort of a light client, that could just you know share logs, that are specifically relevant to me and, that's again like a subscriber I don't see why this is like part of this gap well I'm not saying it's part of this CAP. But I am saying like is there a place in the protocol to optimize for these use cases. Because we actually want more people to run nodes and what we're doing here right. Now is, that we're like really tying them down to like the Horizon model, which is consume everything in just everything no like Horizon doesn't force you into consuming everything you're getting the entire meta + +[09:00] and. Then you filter it yes like is the amount of meta being produced going to be a bottleneck in the you know even in the middle you know medium term I don't think. So like you know XDR is fairly efficient like I would like to see the actual performance problems before picking arbitrary type of filtering technology. Because I don't know what the use cases are like you're saying they are events yeah sure. Then you just keep advance it's a very small subset actually of the media stream is anything, that we're adding here in this CAP prevent us from adding a bloom filter or other sort of strategies in the future I don't think. So but yeah you can do any kind of filtering I mean I think the place where I can see having an actual bottleneck in the future is the + +[10:00] actual size of the meta may get too large for like. If you want to run a light lighter node. But then we are getting into custom logic in core to kind of you know filter, that somehow and I think the best way to do it is actually like. When you're producing it instead of trying to do it after the fact like with the bloom filters something, that I don't see here. When it comes to filtering is any way to filter beyond the contract. So like presumably the contract being out of filter up by the contract is there. Because that would probably be in the transaction matter. But but. If a contract wants to emit a whole lot of different logs + +[11:00] how would an application filter on those specifically or is, that just too granular to micro well at the moment you know the body is an sc val so. If you wanted to do, that you would add the filtering in there. But you know I think we would discuss this okay like maybe, that's not reasonable we should add a higher level filters above the sc bell something we can consider yeah like, that right. Now this structure would make sense here yeah like originally I thought we would do something like right. Now you have this block type right system or contract info and basically. If it's a I mean actually for both of them you probably want to have like an actual event name right like, which is like a short symbol of source right + +[12:00] yeah I think, that makes sense I would honestly look at sort of what the subscription patterns, that you see in other smart contracting platforms are. Because they have explored this space fairly extensively and I think it's what a lot of the SDKs really lean on like. If you're writing a dapp it's fairly common for it to latch onto a bunch of subscriptions. So like. However they're normally doing it we kind of want to support those patterns is there more anyone else have thoughts questions suggestions is it sort of clear what the next move is for you here + +[13:00] Siddharth you move on to catholic actually like there is something yeah, that I just thought about, that I think you know from what I think graydon was asking in terms of use cases like are there expectations for example, that and, that's related to this filtering question, that you want to have proofs of events, that do not happen. So like positive proofs are easy like with the proposal you have like you know basically like you can prove, that a given ledger had a specific event inside a space you know from a generated by a specific contract what. If you want to prove the negative, that is + +[14:00] that a specific event yeah was not emitted in a ledger is, that like the type of things, that people try to do in other systems. So yeah, that's a question for yeah to look into the what happened the use cases yeah 55. Let's move on to it all right yeah. So 55 is basically like trying to layer fees on top of the various like resource metering, that metering, that started to get introduced in the system. So it's kind of a problem is it's a little bit ahead of, that. Because we didn't actually + +[15:00] finish all this like I think we have a the beginning of like gas metering for example in [CAP-46](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046.md). But there are like other things, that are not covered yet. So yeah this is. So the test disclaimer this gap has a bunch of open-ended open-ended things let's see and yeah. So like they are where the CAP is covered there are like several aspects kind of maybe like more important to discuss I think there's the first one around the classification of resources and having market dynamics based on those resource types. So this is an area where. If you look at other blockchains it's actually a mix of things + +[16:00] like some systems like historically started with just like Ethereum just like gas as being the yes I'm also a gas in the context in this context meaning the kind of this metric right, that allows to kind of count the cost of a to execute a transaction and cost here is kind of a pretty loose in terms of definition like mostly computation but. When you do like. a ledger entry like. When you like ledger you also pay for gas. And then yeah like yeah. And then so you have like gas cost, which is this aggregate metric basically of multiple resource types and + +[17:00] then more recently in Ethereum there have been discussions around other types of resources, that are kind of interesting such as bandwidth and any other like basically you have a kind of a funny crossroad there, that is do I want to have a market for each of those resources or do I want to kind of generate like a composite market for those. So like the you have like I think in polkadot what they do is they put a the aggregation with utilities with a polynomial function. So basically take all those resource types. And then you assign them a weight actually it's not even I think they are linear it's a linear thing + +[18:00] thing. And then yeah you combine all those things and you get with your synthetic I don't remember how they call it weight I think in over there. But like yeah like, that's a way to kind of compute this aggregate gas. So the challenge. So to talking about challenges, that comes with those aggregate models is, that it's actually very hard to discover price of things like an example is. If you take a transaction, that does a lot of I o and very little compute, that is competing with a transaction, that does very little I o. But a lot of compute like with those aggregate functions. If try to pay like 10 times more for example bid more + +[19:00] right for one transaction you don't know. If you're signaling, that you're, that your I o is what you want to prioritize or. If it's your compute, that you want to prioritize. So so yeah. So it basically causes the overall prices to have like this uncertainty in terms of like what should I bid so, that's kind of one of the problems with those kind of aggregate metrics. So with, that said with John cannon like one of the things, that we are doing is we have a very clean separation between the different resource types. So io for example. When we read or write the ledger those are done basically outside of the main execution like you can think of before applying a transaction before + +[20:00] executing a contract we load all the ledger entries, that this contract needs. And then it does its thing. And then at the end it produces potentially side effects, that will be applied as like a post step, that's kind of a logically the way to we can you can think about this and the opportunity here for us is, that. Because we have those kind of completely separate we can actually dis and we also do it. Because of performance reasons for parallelism. But like. Because of, that we can actually express those markets like separately and we can. Therefore I think have like cheaper fees overall. Because you can price things properly. So you don't have to do like to kind of articulate inflate for + +[21:00] example inflate the price of reading data from the ledger. Because compute happens to be expensive, which kind of would happen with the aggregate model. So so in the proposal what I'm doing is actually I have like three categories three really brackets for fees. So one is for gas, which is the compute time exactly you can think of it as execution time in our model really. Because like I said earlier we have a full separation between I o and execution. So this one you can bid second market, that I have in the proposal is for reading and writing to the ledger. So here there is actually a competition for there's like there are. So many + +[22:00] there's like a you have like constraints right in terms of a number like the bandwidth to the disk subsystem both in reason rights so. Because of, that you have to have like a market for, that and it is separate right. Now and it is separate also. Because there is a interesting fee model for rights, that I'm going to talk about later let's see and the third category is actually something, that is not a market it's not really a market there are dynamic fees for what I would consider like commodity on the network. So things like producing meta or data, that ends up being stored in archives those do not there's no reason to have like really competition between transactions + +[23:00] instead we have like limits per transaction like basically we say you can only produce I don't know like a 500k or something of meta right for a transaction and then, that's your limit. And then two transactions are actually not competing you know against each other. So there is no need to you cannot have a market dynamics there. So so there are actually a few of those resource types and. Because there are there is no market you can actually aggregate them. So they end up in one big bucket of like deterministic fees basically based on the current state of the ledger plus yeah like the actual transaction. So those are like the three categories, that we have in this proposal + +[24:00] proposal any question at this point on this i've been going back and forth actually on this like a should we or not separate piece we could go with one like I said one market. But I think it pushes price quite a bit too much for like cheap cheaper whichever resource will be cheaper, which is hard to predict do we anticipate, that this will be difficult for users to reason about to like understand these different types of fees and to think about how to set them. So so yes and no I think, that's actually one of the things, that's kind of interesting is, that. When we have already in the system a strong dependency on a pre-flight + +[25:00] mechanism. So like before submitting most transactions to the network like they will have to go through a pre-flight endpoint the pre-flight is the thing, that basically will allow people to compute gas for example to estimate gas for transaction in addition to, that let's see in addition to, that yeah we have like I was saying like certain fees, that are like, that more like dynamic. Because they are based on the current ledger like for example like a bunch of those things, that I was saying are like the price of storage in archive this is voted by the or determined by the validators. So at before submitting a transaction you have to know basically what those parameters are + +[26:00] and yeah. So as part of the pre-flight endpoint you basically get a an estimate for your the minimum fee for those categories well in the case of the non-market-based resources the minimum fee is basically equal to your or very likely to be equal to what you need in the case of well you do have markets it's more like today where you have to decide how much do you want to over bid based on, that. Because the minimum fee doesn't necessarily translate to what the market is willing to pay. So you have to look more at historical data. But but this is like something we can yeah, that like endpoint scan like a Horizon can + +[27:00] can expose right can having yeah having them tracked as separate resources in terms of historical price allows you have actually something a little more stable I would imagine than. If it was like an aggregate I think the yeah the complexity from multiple markets comes from I mean one of the implications is actually. When we construct one and say we validators construct a transaction set it's going to be a kind of a multi-dimensional nexa problem, which is not great but, that's the but, that algorithm would not be part of the protocol it's more like it's + +[28:00] if you have like five seconds to produce a block here, that's there's. So much compute you can span in assembling the perfect transaction set yeah to answer original question Justin at the end of the day the you know the wallets should have an easy way to present an like an estimated cost in an XLM currency, that they can understand and they can tell the user hey like you know this is how much more you can propose for, that like they don't need to actually understand the mechanics of how this works yeah it's true, that. When you over beat right like you're doing for example you say I want to spend like 10 more than whatever happened in last few lectures, that 10 percent you can put it I mean I imagine pretty safely across like those different resource types like the ones with + +[29:00] markets. Because the assumption there is like the they are priced accurately. But I imagine, that more yeah. If you want to really save like a it's hard to predict. But like. If there are like some of those like a gas for example becomes very expensive yeah you don't like maybe you don't want to be as aggressive on the other resource types can I jump in with a couple comments yeah. So I guess. So one it's I guess high level it's not clearly what this adds over for having like a multi-dimensional optimization problem over the one-dimensional one and the reason is + +[30:00] that sorry I haven't thought about this all, that much. But the reason is, that like. When when at least in the current execution model sort of everything like everything executes everything in one lane is going to execute sequentially right. And so the main like resource, that's truly limited in like a block is time right and it's it like unless there's some sort of weird interleaving going on between like transaction executions, that's sort of the one-dimensional resource, that we have to optimize anyway. And so it's not clear to me why like your example of like a transaction, that does lots of I o versus one, that does lots of compute well both of them are going to take a lot of time. If they're using a lot of one resource. And so it's not clear to me, that at least in the current execution setup we have, that it's we gain by sort of allocating some I don't know + +[31:00] resource to like I o versus some to compute as opposed to just like looking at the whole picture of like the total end-to-end time of the transaction, that said it does seem like we it'd be good to like have some kind of like price discovery mechanism for different resources and certainly like you want like an overall limit perhaps on like the total number of ledger entries. And so I know I don't think sort of thinking off the top my head I don't think it's incompatible to have like a one-dimensional like gas market. And then like sort of price markets on each resource in the sense of transactions could bid like you know for the amount of resources they want to use and like the fee per resource. And then you do some like filtering step but, that's sort of thinking very much off the top my head I don't think we necessarily I guess high level I don't think we necessarily have to go to the full multi-dimensional + +[32:00] operation problem, that's. But yeah, that's possible like it just looked like from historical kind of experience right like a like I o is a huge problem and trying to model, that as time is actually making it's actually a disservice in a way to the network. Because you have like very expensive from like this point of view right like something, that's going to suck your disk resources, that. Now stole all your calls right on in a multi you know parallel execution model like store as in you know. Because like I said we do all I o early on and. If you're actually maxing out your drive. Then you're just stuck + +[33:00] right I mean it makes sense to have perhaps a limit on overall io I guess. And then there's yeah the other aspect actually maybe. If we can you know like I don't think we're going to necessarily like close on this multi-dimensional thing you know. Now but like there's the other aspect of yeah ledger size and rights, that is actually another kind of key thing in there, that I guess makes io a little more special also Nico just to go back to jeff's point like I understand, that why I o needs to be you know priced significantly higher than you know the compute operations. But I don't necessarily understand why it needs to have like its own market. So the pricing right, that you have is the minimum price it's not + +[34:00] the market price like. When you market prices is like in the ideal like what is describing the CAP is trying to be closer to like the ideal situation where you can actually construct a transaction set, that's going to basically be like right at the edge in terms of the capacity, that you have on your actual you know underlying hardware. So like cpu and io for disk. If we lose, that visibility. Then you may actually allocate too many transactions to compute. When then like you know you don't have like basically like the kind of natural way of having a + +[35:00] a transaction compete against other transactions, that are paying for expensive stuff, that's kind of what I'm getting to like. If you have like what was it like a good example would be like yeah I don't know, which one of those resources would be more expensive. But they are not going to be in the same order of magnitude let's say like a compute is the one more expensive at a given time. So so you have to pay like 10 times more right or 100 times more than the minimum fee for compute to get into the ledger. But your storage price is also kind of expensive and by bidding a hundred times you also bid a hundred times on storage and you're basically overshooting quite + +[36:00] a bit compared to the ideal model yeah I think the general point here is just, that you cannot, that in reality it's not the case, that there's just time. When a transaction is executing there are two different resources and there are different contention patterns on them and you can't trade one for the other the system does not actually trade one for the other like. If I for example submit you know a hundred transactions every one of, which is doing incredibly cpu and expensive stuff, that doesn't saturate the I o system and there's still no contention on the I o system whereas. If I submit a 100 transactions, that are just doing I o and they're doing no cpu, that doesn't saturate the cpu. So they are really two separate resources and the point where one of them gets a limit and can no longer do transaction processing it doesn't represent a limit on the other and vice versa. And so you can't trade + +[37:00] between the two of them from a market perspective sorry I'm not quite following something didn't we say earlier, that we were going to do like all of the sort of disk reads first. And then do the executions right so, that. If we have a lot of disk reads. Then we have less time for execution. And so the vice versa I sort of understand, that there's not they're not like directly tradable. But they seem correlated or anti-correlated well they're different devices. So like right I'm using the disk. And then I'm doing the cpu right I'm not using it at the same time I mean like sure there's different offers and things. But yeah sure. But the execution characteristics of each of them are different. So you use everyone uses the same desk. And then everyone sort of farms out to multiple threats right I feel like we're talking past each + +[38:00] other I mean yes one goes in order of the other the two of them do get added together in order to represent the total time. But you can't trade time on one of them for time on the other, that's what I'm saying I think I'm not quite following but, that's okay Nico are there any other networks and fee systems, that introduce a split or, that work similarly those are like the two like the yeah execution time and like compute right and disc are like the two big things, that I isolated like the right side I figured it would probably is probably not needed. So I kind of put a flat fee for the other ones like basically like in a proposal I'm basically saying like + +[39:00] we're okay with you know. If you want to have like something, that like. If you have a transaction, that is very important, that happens to emit a lot of miller for example. But you have to get just over a bit like crazy on your compute even, though you're not really, that's not what it's about you have to find a way to get it prioritized let's see are we talking about this okay like I think I kind of wanted to talk about it here is in your proposal you're talking a fair amount about state expiry yeah before we go into estate expiration, which I know is a big topic I'm still trying to reason about like the kind of the wallet experience and user experience of having these like multiple dimensions for gas like + +[40:00] what is the expected behavior here for wallets to be clear right like tomorrow like single dimension or multi-dimension from a white point of view. If you want to estimate it's the same problem right like it's like in the single like a cost model like you aggregate everything into one you have to actually you have like a function right, that just aggregates. But you do have to estimate your bid for each things. So it's not it's actually like a funny thing, that you have except maybe the tools you have for discovering price are not as great. Because it's all implicit okay. So a wallet can do a pre-flight can tell me like the expected cost I can + +[41:00] you know bid bit over. But how do I know how to divide, that between like the compute and the I o well like it's the same in you know like I said like it doesn't, that question is not a question around multi-dimension versus single dimension. Because that like. If you want to say like I want to pay 10 percent more for on top of the market rate right for storage let's say. Because that's where there is contention you have to know, that's exactly what you have to have the yeah market price for storage. If you just layer like 10 flat on everything you're just going to above overbid + +[42:00] which is maybe okay right like for some people. If the fees are relatively low you know what's the difference between you know half a lumen and two-thirds of aluminum or something I don't know like historically we've seen, that like in some situations people are getting are bidding very high on certain for certain patterns it would be great to maybe and forgive me. If it's already in the CAP. But just like understanding what is the like the what's the expected wallet strategy or client strategy here. When like in terms of user experience like what do they present to the user + +[43:00] and what you know what kind of inputs do they expect from the users yeah sure okay let's talk about state exploration Nico where is it well. So state exploration yeah goes kind of hand to hand with the model, that I have there for storing data on the ledger. So like the in the proposal it's basically like there are two parts to it there's the how do you model a write and as in. And so writers can be a create a ledger entry or an update and how does it work like how do we have like the right price basically for the cost of storage. So in the proposal what I did is I basically used as an approximation for + +[44:00] for the cluster storage the bucket list size. So like the some basically like the ledger is organized into those like 19 buckets I think it's 19. And then it you. If you do a an update or a create you basically append, that to the very first bucket in the bucket list so, that's how basically like based on the size of the total size of the ledger I allocate like a price function, that kind of looks like an exponential from fall like basically it starts with a slow slope up to some number let's say you say oh like + +[45:00] validators here kind of determining those parameters. But like you can think of it as the leaders say oh yeah right. Now we are running on drives with I don't know like 25 gigs or 50 gigs of space right and they're going to basically set parameters such, that they don't have to kind of buy new drives you know like. If there's too much traffic. So so the price function is basically looks in this case like you have like your normal slope, that goes to in like I don't know let's say you have 100 gig and it would be like I want to use maybe like the first 80 gig at a rate, that's going to be like a good rate. But not like overly aggressive. And then the last 20 gigs I want to + +[46:00] really slow down like the growth so, that's right like from 5 looks like this hockey stick type of shape right like an exponential and, that's kind of the model for pricing growing the bucket list. So so you have, that for, that's four rights. Then the problem is, that this is only like this is like saying okay you can add to the bucket list. But then like and by the way like. If you delete entries eventually those get collapsed into the buckets. And so the bucket is shrinks in, that model a delete you still pay for or delete actually. Because delete is actually adding a little bit of data to the bucket list so, that's like first thing to note here + +[47:00] and. Then yeah what I wanted to get here is as a kind of more like a desired property is, that I want the price of storage for people to kind of be the same for everybody regardless. If they signed up for you know created an account like two years ago or you know in five years it should be over time same cost and there should be no way to do like to have like a free ride on the ledger right like. So you the on able to have like store I don't know like NFTs like jpegs whatever on chain you pay for this. When storage is cheap. And then now you have like something, that is cheaper than even storing in aws right like, that doesn't make any sense + +[48:00] so this in with, that said there's. Then a need for having some way of kind of resetting in a way the price of storage over time and the mechanism, that I use there is state expiration. So state expiration here means, that you have to basically pay for market price of storage to maintain a ledger entry on the like a live in the ledger. If you do not pay for this for your brands basically you get perched, that's kind of the choice, that I made in this cap + +[49:00] there are a bunch of other ways, that can be done, that are actually mentioned in the recap in the other approaches. But like the reason this kind of works with the other mechanism is, that basically like. If you set a policy for example by default you have to pay rent like a refresh every year let's say. And then you don't pay your renewal after a year yeah your the data gets deleted and yeah. So there's this kind of constant churn I guess on the ledger, which is kind of a new pattern and, that construction is basically a way to guarantee, that everybody in the last year has been paying basically something, that is + +[50:00] market rate how long do you expect the like the how far in the f like. When I trade a letter entry how far is the maximum expiration date, that I can choose or, that will get chosen for me in the future this stuff isn't as far as I can tell it's not specified in the CAP how, that works. So right. Now the CAP what it says is, that it do not. So you can renew indefinitely right like the renewal window is determined by validators so, that's why I said it's like a like you say every year you have to pay run right. And then every time you write you do an append, that happen is valid for a year right, that's good, that's demo. Then + +[51:00] isn't there a natural trade-off between like renewal time and like fluctuations in this price of storage or are we expecting like this storage cost to not increase too quickly well it depends like what we've seen on the current network is ledger size has been increasing actually rapidly over the last few months. Because of like some strange token activity, which is not entirely I mean it's not I mean there are a combination of factors like one is yeah like just price of in crypto assets go you know going down. But also like. When they were still pretty high you had like an incentive to create more crypto assets. So it basically those things kind of cancel each other and the growth has been pretty significant + +[52:00] so I would say like seeing a growth rate, that takes you to yeah something, that will be you want the market basically to kind of get to an eco equilibrium right like where like you do not have like those weird use cases appearing on the network. If they are like cheaper than right. Now I think on the network the problem we have is yeah we are cheaper than aws fault in some situations isn't there a trade-off here between not necessarily a trade-off. But isn't there a consequence here, that people will have to go and touch their data from time to time and people are procrastinators and like + +[53:00] let's say, that like the expiration date is you know. When you're in the future or something everybody at the end of, that year. Then has to go and touch all their data and there's gonna be a huge log jam to get it done well, that's there would be a large jump. If everybody creates their stuff at the same time. But you would, that's not the case it's going to be you know like basically like the thing, that expires in a year is the whatever happened today right like at the given date right. But I mean like imagine, that today you have a day with like a lot of activity like you can look back historically of Stellar's history and there are periods. When there were like lots of token creations and stuff you know there are days. When there's hundreds of thousands of blood draw entries created. And then abundant well many of them are abandoned. But like you could imagine a world where they're not all abandoned right. And then what happens a year in the future well nothing well people are incentivized to come back some time between here. And then I don't + +[54:00] see this as any worse than the fact, that we have to handle load spikes in general yeah I mean we have to handle load spikes and let's bikes may get replicated. But yeah like what. So what I sketched or what we sketched actually in the cup, which is you know just a more of a strowman type of thing. Because I'm sure we can do better than, that is we actually are kind of ensuring, that you do not have like giant spikes. So like it like I think the spike would be not be. Because of situations don't like what you're what you mentioned. Because actually activity from today. If you have like a comp a linear like a translation right like an actual just shapes right like all this activity gets translated exactly a year from now I you don't have a problem I think the it's just like additional cost of running a validator right + +[55:00] like it's let's you say okay I need to. When I set my limits right the number of rights I actually have to think about well actually my capacity in rights is half of what I can add to the ledger. Because I also need to delete right. But what can happen is more of a like. If we have different expiration times, which I actually kind of briefly talked about there is, that. If you have different expiration times you can have actually different dates, that end up expiring at the same date and, that's for, that for those type of situations you have to have an algorithm, that kind of smooths things out and, that doesn't actually cause the system to kind of create a gigantic spike you know + +[56:00] at a specific dates. So just we don't have a lot of time. But I just want to ask what like the biggest question I think, that there is like what is the what's the expected behavior here like you know these ledger entries are representing financial instruments let's say assets just for simplicity even, though we have like a standard asset contract and I'm you know paddling on shares of something is what is the expectations or what is what's expectations there's like am I supposed to like once a year like come and touch this is like the operator of this financial instrument supposed to do, that for me. If you know. If you look at you know various common immutable contracts like uni swap you know. And so and I'm holding on like these uni tokens what's the expectation here like who's going to touch these for me + +[57:00] right. So in the CAP I actually let this kind of flexible like there's a. When you there's actually a special host function to, that you can call, that is basically a rewrite equivalent to like a rewrite ledger entry so, that you refresh the, that expiration time anybody can do, that all right like there's no it's not I understand, that anyone can do, that. But who is who do you expect to do, that well it depends on the type of users right like power users probably don't want to do it themselves like other situations you know. If you're like you said like this very passive type of person maybe you should pay somebody to maintain your stuff. If that's what you really want in other situations I suspect. If people are not active they probably should just be using centralized infrastructure + +[58:00] infrastructure like you know contracts or whatever, that are a little more centralized I think trump's point is, that there's a free rider problem here like imagine, that all the people in this room are using a single contract right like, which one of us can touch me all of us have an incentive to wait until the last second and play chicken and hope, that somebody else oh like yeah for a short contract I think, that yeah well it's. If it's a shared goodness like just I, that's not what actually what I asked John I assumed, that like each of us will have like our own ledger entry within, that contract. So maybe did I not understand correctly I think I'm concerned about people's money vanishing into thin air, which is completely reasonable these are balances and we're just going to delete them, that's not super great I recall there being a proposal to like + +[59:00] have. But what happened to this like. When you have a ledger engine deleted it like gets dumped into like some kind of merkle try and you store the hat root of, that try. And then like. When I want to bring it back I can like bring in a proof, that this is what the state was right. So this is actually in the appendix yeah the alternative section. So this there is actually a very detailed proposal in Ethereum foreign v2 about this. So the complications from this archive approach is. When you want to. So like restoring is actually yeah like a trivial like I said like you know you have like a maybe a way to do like a to just to basically store, that entry in inside a merkle tri of source right. And then you just need to provide the proof for, that the complexity comes from + +[01:00:00] when you want to create an entry. Because you have to prove, that entry doesn't exist in historical data like, that was actually archived and, that gets really nasty very fast right sure. But like, that like not wanting to do, that doesn't address creating this question like what do you do. If your money gets deleted it's an event like you know. If it's a like with an issue it's like today. If you're on the seller network. If you're sending back to the issuer you know you basically burn it you can ask the issuer like hey sorry I didn't mean to do, that. But I didn't burn it. But like what. If you're like what. If it's not like you know an off-chain issuer, that you can appeal to like what. If your unit swap lp tokens get deleted what do you do it's tough yeah like you know + +[01:01:00] these those are the rules of the network wait but, that's not a great solution long term right like we're gonna have a lot of people consider the alternative right, which is infinite growth of a ledger with infinite price, which one do you prefer I mean like objectively like. If somebody had a million dollars of like uniform lp shares get liquidated it would have been better to pay for a million dollars of storage so, that one person yes. But like what about everybody else and, that person with a million dollars like. If they have, that it's kind of like key management like you have procedures to make sure, that you don't lose your million dollars it is currently the case, that people with million dollar balances can in fact lose them. Because they can lose their keys. So there's there is something to appeal to here like it is actually possible for you to lose money just by + +[01:02:00] misusing the system. But what about the other end of the spectrum, though somebody who doesn't have a million dollars they have a small amount of their balance and they're just constantly eating, that up by paying these fees to keep their balance alive I mean it sort of reminds me of like bank accounts where you're like bank accounts just disappearing. Because you're paying all these fees yeah they do eventually disappear. If you put five dollars in a bank account. And then wait for 20 years it'll go away I mean this just points to yeah you're not stirring your balance in the right place like this is shared infrastructure like. If you don't use it you lose it. But nikki you can't just like ignore the entire industry, that we're in and you know I'm not pretending, that this is not a problem like people are overlooking this problem. But but you know it's I think it will be really difficult to bring people into Stellar telling them oh this is the way it works + +[01:03:00] in selena right. So it's a fader no it's not selena doesn't actually do any of this right. Now they have rant no they don't they literally don't charge it right. Now it's a to be done in the future feature, that no one wants. So they're never actually going to get around to doing it they have infinite ledger in memory they allow they charge you money to make to allocate space. But they never actually reclaim it there's no active garbage collection process. So we're over time at this point and I think, that means, that we have to stop I mean I know, that this is an interesting conversation and there's seems like there's a lot to say about the concept of expiration. But I think we'll push it to next week's meeting and hopefully have some of this discussion on the Stellar dev mailing list and here also in Discord in the various John cannon channels so. If anyone is watching and has thoughts about, that feel free to join the Stellar dev mailing list or to + +[01:04:00] chime in on the Discord here we'll continue to share work and ideas and conversations debates as they happen and we will see you here again soon thanks everybody + +
diff --git a/meetings/2022-07-01.mdx b/meetings/2022-07-01.mdx new file mode 100644 index 0000000000..a64e5c54a9 --- /dev/null +++ b/meetings/2022-07-01.mdx @@ -0,0 +1,188 @@ +--- +title: "Single Balance v. Double Balance Showdown" +description: "A deep design discussion on whether Soroban should expose Stellar assets through a single shared balance or a dual-balance model, weighing UX, security, performance, and ecosystem compatibility." +authors: + - alex-mootz + - david-mazieres + - dmytro-kozhevin + - eric-saunders + - graydon-hoare + - jake-urban + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: + - soroban + - CAP-46-5 + - CAP-49 + - CAP-52 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session centers on a critical Soroban design decision: how classic Stellar assets should be represented inside smart contracts. The group compares the existing dual-balance approach—where assets can exist both in trustlines and contract balances—against a proposed single-balance model that exposes one canonical trustline balance through a built-in Soroban token interface. + +Much of the discussion focuses on trade-offs between simplicity and isolation. Participants explore implications for wallets, Horizon, security approvals, trustline semantics, performance, and long-term ecosystem evolution. While open questions remain, sentiment throughout the room trends strongly toward adopting a single-balance default for classic assets. + +### Key Topics + +- Single balance vs. dual balance models: + - Single balance exposes the trustline as the canonical Soroban balance. + - Dual balance keeps classic trustlines and Soroban token balances separate. + - UX benefits of a single source of truth for wallets and users. +- Technical feasibility: + - Single-balance transfers can reuse existing import/export mechanics. + - No major increase in classic-protocol coupling compared to dual balances. + - Performance considerations around additional ledger reads. +- Security and approvals: + - Expanded allowance risks when contracts can pull directly from trustlines. + - Comparison to existing DeFi approval risks on other chains. + - Interest in scoped approvals, permit-style flows, and safer defaults. +- Trustline semantics: + - Whether contracts should require trustlines or implicit opt-in. + - Desire to reduce reserve and opt-in friction for contracts. + - Tension between consistency and minimizing classic protocol leakage. +- Horizon and downstream tooling: + - Single balances simplify balance visibility in Horizon. + - Concerns about centralization and data-model complexity. + - Possibility of alternative lightweight indexing solutions. +- Token interfaces and limits: + - Mismatch between i128 token interfaces and i64 trustline balances. + - Need for concepts like “available balance” vs. total balance. + - Avoiding unsafe assumptions by contract developers. +- Built-in token contracts: + - Debate over maintaining both a pure Soroban token and a classic-asset-backed token. + - Preference for a standardized token interface with strong interoperability. +- Directional outcome: + - Broad consensus to pursue a single-balance model. + - Remaining open questions around opt-in semantics and edge cases. + +### Resources + +- [CAP-0046-05: Smart Contract Data](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-05.md) +- [CAP-0049: Smart Contract Asset Interoperability with Wrapper](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0049.md) +- [CAP-0052: Base64 Encoding/Decoding](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0052.md) +- [Project Jump Cannon: Choosing WASM](https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm) + +
+ Video Transcript + +[00:00] Okay. So let's kick this off today we're trying to make a very important decision with regards to how Stellar assets are represented in Soroban and this is a decision, that we've been exploring various paths to achieve and in the past few months and we've gotten to a point where we really need to make a decision and anchor this so, that we can move forward. So in order to facilitate this discussion we put together this doc, which Justin just shared in the live chat. If you haven't read it please open it up and take a look at it. While we're talking. Because it's very important. So the kind of like the Crux of the decision, that we need to make is between this idea of how Stellar assets are represented in Soroban the current implementation, that we have is has like this maximum + +[01:00] separation where Soroban tokens are completely detached from Stellar assets. So you can have a balance both in a Stellar trust line and in a Soroban token by balance and we're trying to figure out whether we want to keep it, that way or change to a single balance approach before we touch on what a single balance approach means I do want to mention, that we talk a lot about wrap tokens here rap tokens is probably a really bad semantics it's a bit vestigial to how we refer to these tokens. When we started off. So we'll probably change up semantics. But I just want everyone to be on the same page, that we're talking about you know what is the Soroban interface into Stellar assets. So so I talked a bit about dual balance approach the single balance approach is + +[02:00] that basically accounts have just a single balance in their trust line and, that is also exposed as a Soroban balance through this built-in contract we put together in the stock a set of requirements and non-requirements we haven't gotten a lot of feedback on these. So I think it looks like people are on the same page and maybe. Because single balance approach is a bit new lee would you mind I don't want to poke you too hard. But can you give a very high level overview of the single balance approach as the original proposer yeah. So yeah. So the goal with the single balance approach is really to make it so, that any participant in the ecosystem. When they're interacting with an asset, that exists on Stellar today in a truck being held in a + +[03:00] trust line they only have one place to look for they for, that account so, that account you know an application doesn't need to know a wallet developer doesn't need to know they need to go and look at balances in two places they only need to look at, that one place users don't need to decide where you know, which balance they're going to send from or send to all of those things. But on the same note with the single balance is really mostly focused on Stellar assets. So you know. If somebody goes and makes a token on sarban their own custom token you know, that's like a completely separate thing. So so the idea with single balances is the. If you're interacting with, that Stellar asset within sarban you know it looks to you just like it's any other token. So it's a Stellar assets still have are still exposed in Soren with the exact same token interface they look + +[04:00] like any other token like an erc20 style token on Soroban. But we're just storing, that balance in the trust line. So there's just a single location right and there's some fork in the second section of this dock around single balances whether you know the trust line semantics are only preserved for accounts or do we include trust lines for new types of entities like contracts and this in this approach and we can talk a bit about, that later I think my first big question probably would be to Siddharth. If you're live on this I know you've been the main implementer you and John for dual balances and maybe you can share with us from like you know from a Hands-On technical perspective. If we are going to move to a single balance approach what would be the implications + +[05:00] yeah. So you know our like I've John and I took the perspective, that we would like to minimize the relationship between classic and Soroban what and I say minimize. Because we can't like importing and exporting actually touches I require Soroban to interact with trust lines and you know there's actually some pretty complex code in there where we actually replicate electrostline semantics in Soroban like I think we should like I'm still like support on, that side in, that you know, that question the thing you mentioned about how trust lines on for contracts not just seller accounts, that I think is like moving in the wrong direction. If we do single balances we are introducing more of classic into sorbonne and we're + +[06:00] introducing more of the classic protocol into Soroban and. When it's much simpler to maintain the separation as much as we can so, that specific point is something and I think and Nico has a strong opinion there as well we can ask that later I think, that the before we go to, that the question is you know you mentioned, that right. Now we're replicating trust line semantics in sorovan. Because we need to interact with trust lines through the Import and Export functionality so, that is true for dual balances and obviously it will also be true. If we go for like a single balance approach is there significantly more kind of like classic semantics, that we need to adopt in order to move to single balances the answer I would the answer is no I + +[07:00] don't think there would be significant any significant changes for example I think we've talked about. So you can actually use those functions the functions, that Import and Export use to implement a transfer using the single balance world right. So in, that case there would not be you know it's not like the tone contract would change much I mean we already have to pre-flight with the trust lines right you're just you know it's pretty much like a you're just running an import right as a transfer it's just like there's some slight differences. So yeah. If the answer to, that is no there are I don't think there would be significant differences okay. So I just want to make sure we're all on the same page with regards to what you're saying here you're saying, that in terms of interaction with classic what do we have right. Now as import export on Dual balances you can think of a single balance transfer as basically like as these two things can combine like an import and + +[08:00] expert at the same time. So there's no added overhead of like interaction with the class approach no there isn't okay. So before we go into the details of more kind of like thinking about the specific implementation of single balances is there anyone here, that wants to like Advocate some more for dual balances I mean okay maybe the only thing I can add to this thing to the Dual balance like what is good about the dura durable. So I think it's actually on the performance side. Because if you con. If you think about the type of legendaries, that you need in the dual balance world. When you're doing + +[09:00] sorbent payments you only have to deal with basically one place whereas in the single balanced world. When you do things like allowances. Now you're dealing with well actually you're dealing with two Legend trees plus you need to read issue our information you know from classic. So there's like some other added overheads. But you know we can maybe set, that for the performance or implementation discussion okay. But I just want to make sure like. When we're dealing with allowances today even. If it's pure Soroban you still need to touch both the balance Ledger entry and the allowance Ledger entry right yeah I think the only difference would be anything you would read from the issuer right. Because you still have two balances you're gonna transfer on the StoryBots side and you know on the classic side you have two trust lines yeah. So you have like you have the one + +[10:00] time transfer right like as far as like the balance where we store it versus the allowances there is actually no requirement, that they are separate in the current implementation you could actually store them in the same place too, that's true you're saying, that you would have one Ledger entry with right all the information yeah, that's not how it works at the moment but, that is true I think, that's a bit weird. Because balance is operate on a key of a single account and allowances operate on two accounts like who owns the balance and who gets the allowance right it doesn't matter it's basically on one. When you do a payment as part of an allowance right you have like one of the endpoints is basically holding the allowance. So you pick, which side. But it doesn't need to be a third one right. But but also just to zoom in on, that sit in terms of. If we want to adhere to + +[11:00] current authorization semantics even. If the do with the Dual balances approach isn't there a read to the issuer account anyway yeah. If you want to do like. If you want to check off required for example is, that what you were talking about yeah there is the issuer, although you can minimize, that right at least like, that prototype we I wrote up. If it's an you can only need to check. If it's a New Balance after, that auth required doesn't matter right but, that I mean, that's like a minor implementation detail does, that make sense yep, that makes sense I see, that Eric is raising his hand maybe you can invite him to speak Eric you're invited hey can you hear me yeah I just wanted to ask are there any security implications in terms of surface area in the single + +[12:00] balance approach. Because in the Dual balance approach it feels like classical assets are pretty separated from Smart contracts and they can't be interacted with directly. And so on. So in terms of things we may not have thought of bugs, that could be introduced. And so on are there any security implications by getting a single process by the way this is a question, that Fred from Lightman was asking in the live chat too yeah and I think, that's a really good point I think it's in the docs somewhere. But but just to give some more context on this you know smart contract balances are susceptible to various approval issues. So let's say for example, that either maliciously or. Because of an error a contract asks me for like to approve more funds than they actually need and they somehow managed to get me to sign, that + +[13:00] approval. Now they can access funds, that logically I don't want them to have access to and, that's definitely a problem and, that's definitely an issue, that. If with the clear separation of classic and Soroban it doesn't exist it's and by the way this is a problem in blockchains and in general and defy I do think, that there are user actions, that can, that can alleviate some of this I can basically have like you know different accounts for as a user I can have different accounts for you know the risky stuff quote-unquote, that I'm doing in Soroban versus the less risky stuff I'm doing in Stellar. But the protocol doesn't mandate, that separation + +[14:00] but yeah I think, that's a super important perspective here. If if and maybe like I'll add to this, that in terms of risk. If you look at either implementation has actually a way to manipulate the trust line right the balance on the classic side. So like either it's like through this bridge you know like import export type of function or it's actual you know. When you do a transfer in either case. If you have a bug you're kind of you know in trouble. So I think it doesn't like from, that point of view implementation wide I don't think it has you know much consequences actually yeah I agree with you I think from an implementation perspective it's the same surface area. But at like Eric and Fred's point the approval + +[15:00] surface area is a bit more robe for people to hang themselves with but, that's something, that yeah I mean I guess maybe something here but, that's not part of this conversation. Because you know like at the same time I do want the serban tokens to be secure right. So I think this is part of like improvements we can make to the contract interface I think they are like obvious flaws in. If you were taking their verba team something like erc20 yeah I know we already started to incorporate some of the changes, that were done later. But it you know we don't need to stop there right like it's something, that I think we have to double down on actually before we launch, that thing right and here's a this is a great Shameless plug for some of the work, that we've been doing on our token contract with supporting things like permit style approvals in, which you only approve the specific amount, that, that you'd like your you know DeFi + +[16:00] protocol to use at the time. So I do want to go a bit further down the document and Talk a bit about this trust line yes trust line no trust line discussion, that we've had I know, that the original proposal, that Lee made was to preserve for a single balance was to preserve trust lines only for accounts. But contracts and other non-account entities do not require a trust line, which means, that they can do things like you know they don't have a minimal Reserve and they don't need to opt in to hold an asset there's another folk in the road, that we can take and, which is to mandate trust lines across the board even for non-account entities like contracts like other identifiers Lee can you tell us a bit about why the original proposal + +[17:00] did not have trust lines for non-account identifiers. But yeah I guess it just didn't seem particularly necessary like accounts have trust lines today. And so they have a balance, that's there and everybody knows where to go to find, that balance. When contracts exist tomorrow or. Now in future net you know we already I think, that's, that's an entirely new thing like there's not really any need for them to depend on trust lines well we don't expect them to inherit everything you know all the semantics around trust lines. So yeah I guess this didn't really seem like an obvious advantage to do. So but maybe there's some implementation reason to do. So + +[18:00] got it and I will mention, that there is a non-requirement section in this Doc and a later addition to the non-requirements is, that opting in to hold a Stellar asset is not a hard requirement. So today you need to explicitly create a trust line in order to hold an asset and we are loosening this requirement for the sake of interop right. Now we haven't gotten any to be honest we haven't had gotten any qualitative feedback on either direction whether this is really good or really bad. But we do know, that trust lines have been an extreme pain in the ass to a lot of us throughout the years and we are looking for ways around them, which some of these proposals include including 2.1 and yeah this is still an open question. But + +[19:00] we're probably going to loosen, that request Nico can you talk a bit from a technical perspective what are the implications for not having trust lines on contracts and why do you think we should have them. So yeah like the trust lines. When I say trust lines here by the way like I didn't mean necessarily A classic dress line it's more of a standardized format right like a ledger entry or whatever for holding balances, that I'm thinking about and this would be so, that. When we think about wallets you don't have to necessarily like right. Now the way you can get a balance for cereban token is by calling the get balance endpoint on the token + +[20:00] and there is basically no way to do something efficient like what you like a block Explorer Style you know give me the balance for any Ledger entry or any account right a servant account. So so, that what I was thinking is, that maybe there's room there for, that and in particular I was thinking in the context of classic. So classic assets. So existing assets, that are being held by let's say a smart contract wallet. So basically Where You Are balance is not held behind a Stellar account. But held by a by contract, that enforces you know policies basically like in, that world it's you don't have the Horizon experience basically, that you get with a + +[21:00] classic account like a standard account and, that felt a bit like a limiting right from a like a efficiency point of view and all, that I mean I can see, that for the. If you get into the generic you know type of contracts you can do a lot better than calling the you know the actual get balance. Because you know maybe it is actually some actual code happening there. But for those type of accounts the classic assets right like I thought we could actually do something a little bit closer to what you get with standard icons. If that makes sense. So I think this touches on how Downstream systems handle tokens I think it's still an open question for Horizon for example how they deal with tokens Eric do you see + +[22:00] Horizon actually reporting token balances for contracts like you say I think, that is still an open question, that we're thinking about there's pros and cons here obviously. If we go down this road where assets and tokens become the same thing. Then you sort of get a default answer to, that question. But in theory you can imagine a world where there's some kind of standardized token, that obeys the interface of you know the standard contract and we make, that available the question, that I want to understand. If we're going to go down, that road is what other use cases there and can we solve those problems in other ways say through streaming endpoints. And then fast catch up for history. So I think we're still trying to understand the use cases and with respect to Horizon in general there's this question of what does Horizon solve and for who Horizon is like a Swiss Army + +[23:00] chainsaw you can do everything it turns out, that. When you actually analyze what people use Horizon for the vast majority of users are using a very small number of endpoints and also in a relatively recent time period. So we have the possibility to differentiate with different products, that can actually map much more cleanly to the actual requirements right so, that's like part of where we're thinking and trying to understand better what will be the best form factor. So I don't have an immediate answer. But it's definitely something we're actively trying to understand more I love Swiss Army chainsaw I think, that should be the description on the GitHub repo. So it does seem like. If we're thinking about Downstream systems you know going back to the big discussion of single balances versus dual balances it does seem like single balances is the least amount of friction to actually get Horizon to keep accurately report + +[24:00] you know like the classic account balances yes I would say yes and no yes in the sense, that it on the face of it feels like there will be almost no work required it will just automatically happen the complication is to think about whether having it automatically happen is in the end a net positive right. So to give you one example of a trade-off, that we have here. When Horizon provides token balances natively, that actually encourages some centralization in the ecosystem. Because in practice today most Horizon users actually use the SDF Horizon instance another example is well. If we actually identified the need for tokens we could produce a form factor, that's extremely minimal, that lots of people could run very cheaply, that might actually Aid our decentralization goal. So there are some questions around, that + +[25:00] kind of stuff, that you know I would like to put some ideas on the table around. But it is true, that I think from our perspective. If balance is combined they would just pop up and appear in Horizon and I do see a benefit from a usability standpoint it is pretty confusing to consider them tokens and assets as separate things yeah I imagine you'd still have to account for trust for transfers, that go in and out of things, that you're not tracking like contracts. So yeah, that's true but, that's much more bounded problem yeah I hear you Sid you were talking a bit about your concerns with adding kind of like opt-in semantics to contracts and other Soroban entities can you tell us a bit about your concerns yeah I mean for example. If you know you + +[26:00] have a liquidity pool, that's going to accept, that's going to allow trading with like a classic asset. So the contract would have to opt in right, that's friction I mean. If this it sounds like this it won't be an actual trust line, which is where some of my concerns were. But yeah I had questions like who would own the trust line and just it adds friction, that and, that I don't think is necessary and it's something I talked to Nico briefly about. But you know. If you really wanted the balance of a contract or the token balances for any for a tone contract you could actually look at The Ledger entries right and maybe this is something we can encourage, that is an option right like the structure is pretty well defined at least for the built-in token contract. So I don't like this sounds like something, that we don't need to do it was pretty much what Lee said right like accounts already have trust lines and + +[27:00] that's where the balances live. But a special case the balances for contracts it doesn't sound like unless we have a really good reason to do it I don't see why we would got it another thing, that is an open question for single balances is whether or not we make them compatible with classic Stellar operations like for example can I just like make a regular payment to a contract this is actually in the non-requirements section. But it's still an open implementation detail and I was wondering. If people here have any opinions in the space I mean to me, that seems like a pretty good thing to have just. Because like you can imagine a situation where maybe there's like I don't know like some contract or something, that's. So successful, that we + +[28:00] want to like Implement, that functionality in classic and make it you know super efficient or something. So the more sort of compatibility you have like the easier, that is to do without sort of upending things the better. So I have concerns about, that David I think one of the concerns is, that like what is the use case like usually. When you send funds to a Smart contract you actually want to do something on a smart contract side for example you know I don't just like send funds to UNI swap to get a swap I actually send funds and I facilitate a swap. So just you know just letting the classic operations send to a contract or a payment sent to a contract doesn't necessarily save me the need to communicate with, that contract on the Soroban side so. If + +[29:00] it doesn't necessarily save me, that much and. If classic operators on the network think exchanges for example have to adapt their currently supported operations and you know this is something, that we've had a lot of pain with dealing with exchanges I'd support for muxed accounts most of them still don't I think, that's a like a strong argument to you know to keep the you know the smart stuff on the smart side I don't think, that's necessarily what David was saying it's like he like I think the idea is more like. If you have scenarios, that are very high value you don't want to necessarily have this kind of like concretely like Frozen + +[30:00] you know like a wall between what is classic today and Soroban like I think over time we want to kind of have some ways to integrate those a bit better and this doesn't necessarily mean like you have to for example use the same data you know like format in you know in behind you know the scene. But like you need to kind of think ahead in terms of what type of interactions I want to enable in the future like for me like the deposit into a contract like in the shot you know like in the near future I would say yeah it's for something like an mm no use case right or like those optimizations right away you can think of like maybe it's like an actual deposit and do stuff right, that we under implementing natively. But like I do think, that in the short term those type of depositing into a smart contract is actually useful. If that smart + +[31:00] contract is a wallet. So I guess I've heard this brought up a couple times and I like this idea of a smart contract wallet and this is kind of the like concept, that concerns me. Because the whole. If the whole thing, that we're debating is you know single balance versus dual balance and our solution to single balance is to keep Stellar Assets in Stellar trust lines. But we could also still have this smart contract wallet, that bypasses trust lines and it sounds like we could end up in a world where there are still two balances like we could end up in a world where even. If we go with the single balance approach we end up in a world where there are dual balances and applications still have to look in two different places I'm following like I think what you're saying is. Because you have to. When you + +[32:00] think of like this the contractors or what it I think this is actually something we're discussing actually in a different context in the existing protocol, that was we wanted to decouple your identity on the network in today on the network is kind of your master or public key right and often. And so in something else you have like different Keys attached to, that thing and you actually disable your master key. And then your identity is kind of weird. Because it's this you know public key, that actually doesn't mean anything. So at some point we were talking about hey maybe we should have accounts like your actual identity is like you know a hash or something more deterministic and. And then you the way you authorize using this account is actually a separate topic right and I think in Soroban you have actually a similar analogy there where your identity your account is actually + +[33:00] the contract or some maybe like some other like Alias on top of, that. But basically your you have, that as an identity as in. And then how you use it right how you withdraw funds or you know platform transactions with, that with, that account is actually the managed by the account. So you can have like very flexible type of policies right around how the account works, that's, that's like I don't know. If you looked at the I mean this is a topic for a different day. Because we are actually been thinking a lot about authorization right like in an authentication and I don't think we're ready to kind of discuss this in this meeting. But like there are things, that are highly inspired by the type of research in Italian, that has been done with the account abstractions and things like, that great. So I think. If I try to summarize what Nico is saying there are ways. If users choose to do, that there are ways + +[34:00] in, which they can have you know on under the same you know signer they can have multiple balances whether you have like multiple accounts with the same signer or whether you have multiple smart wallets with the same signer on the smart side the question is what is like the what is the default Behavior like do we default to two balances like in the Dual balances proposals or do we default to a single balance foreign okay I am going to say, that we have been getting a lot of overwhelming like the responses tend to be towards the single balance approach. And so I just want to open it up for you know anyone else who's in + +[35:00] the crowd right. Now I think this is a lot of people. If anyone wants to talk in favor of dual balances foreign okay hang on go I just said hi Kyle here I just have a general question around dual balances versus single balances is there any so. If we think about use cases down the line for Soroban you know either at an Enterprise level or a consumer level is there any reason why the double balance would make more sense to enable other use cases I just want to ensure like as we were discussing this is there any limitations to use cases dependent upon this decision. So I do want to mention, that. If you + +[36:00] look at the dock at the top section there's semantics you can create pure Soroban tokens, that either adhere to our standard interface or even adhere to whatever weird interface you want like none of this actually prevents you from doing pure Soroban whatever crazy asset you can imagine the context of this discussion is for these standard assets, that are issued on the Stellar status Stellar assets, that are already supported in various exchanges and custody Solutions and wallets like for these assets what capabilities we have on the Soroban side. Now there are the Dual balances. If you look at the pro section for dual bounces there are several things there for example you have you know you can have a much bigger your balance. Because the standard token + +[37:00] contracts have an I and into a 128 integer type instead of the kind of like the sine 64-bit integer, that we have in Stellar assets. So there are some like small things there. But generally speaking from a capability perspective I don't think we're losing anything significant by moving a single balance thanks for the answer and maybe like something I will add to this is, that one of the like things, that, that is actually a big TBD right like we don't know it's in the I think it's in this last section like open issues is by. When you move to this single balance right like you have to support some of the clicks, that you know from + +[38:00] the like from the classic tokens like in particular you have like the balance question like what is the balance of the account. Because there is a difference between the what you want to show in the in a UI depending on the scenario right like is it the usable balance or is it the your actual net balance you know accounting for liabilities. So this will actually have consequences on the token interface and as a and. Because of, that right like you end up actually taking on this type of additional requirements for any token, that you want to issue on the network even. If it doesn't it's not a classic token right. So like those other tokens basically need to implement the like. If we need a concept of liabilities for example like they need to implement, that and maybe they + +[39:00] are there are no apps. But you still need to think about, that stuff yeah. So to your point Nico I think you know I think we I commented on the google.com this. But the idea of having like a get available balance on the standardized like token contract doesn't seem like a huge Legend and like standardized or sorry native tokens on Soroban the value returned from get balance and get available balance would effectively always be the same you know. If as long as it's the standard token contract. But but I do agree I think personally, though this is a pretty easy adjustment, that we can make. So like you know just having a get available balance, that you know takes away liabilities from the return value I mean this is one of the ones we know right like we as we go through the you know designing this I think we may find more of this type of stuff like the + +[40:00] if you want to write like a safe code you know like you need to ensure, that you're not overflowing in different ways like, which is something you don't necessarily need to care about as much in the uint 120 you know. Because the interface is even oh in one 28 right whereas here the balance is hidden as a 64-bit number. So you have to deal with like a special you know checks and in order to perform those checks. If you don't want the payment later to fail and you want to kind of pre you know like as part of pre-flight I don't know some other check right, that you want to ensure, that the payment would work you need to know what the behavior what are the safety you know like what are the constraints right on your own on your balance for example maybe I don't know like I think this is the type of stuff we'll have to go through + +[41:00] yeah seriously and I think this is an important con, that we it's in the dark. But we haven't discussed today, which is you actually there's this idiosyncrasy where the single balances are in practice you know. When they're in a trust line they're constrained to a 64-bit integer. But the actual interface the current interface, that we're discussing is 128 bit integer. So you have these failures, that are unexpected just by kind of like observing the types they're unexpected. But it's I think it's important, that we recognize, that those failures can happen anyway. So you know. If you have a near max 128-bit value in your contract data and you try to add a value, that's kind of overflow you're going to get narrower. So this it's not like this is a new error case it's an existing error case it's just, that + +[42:00] if you take. If you have a less than 64-bit value in there and you add another 64-bit value you might assume, that would succeed one of my fail. So I think like, that's the exactly like the code the contract code is written you know you write checked code basically using 128-bit arithmetic. But then. When you actually. If you are actually interacting with those other contracts you actually get some other Trader mode. So your check code is no irrelevant basically Lee were you trying to say something yeah I think it's a really good point I think the more like over time once there are custom tokens or like new forms of tokens + +[43:00] they're going to have new failure modes too. So I'm not sure like how like I'm not really sure how much contracts should try can make assumptions about, that sort of thing anyway. But like. If we're talking about Stellar assets and everything is just a Stellar asset. Then I could definitely see people making those assumptions. So yeah, that could be bad got it and lee after Nico talked a bit about why he's interested in adding the opt-in semantics even for dual balances it are you persuaded in any way leader muted yeah I feel like I want to think about it a bit more. But I like this second model is I think the arguments for it are, that are + +[44:00] compelling it's just the 120 and 128 bit issue is what gets me got it okay Justin do we have any questions from the audience well first of all I think. If anyone is in the audience and wants to speak you can raise your hand and we can bring you onto the stage there is actually a fair amount going on in the live chat Channel, although a lot of it's sort of been answered in one way or another either via text in the live Channel. Because people are sort of talking amongst themselves trying to look through to see any, that haven't already come up or, that haven't already been answered got it. So there is one question, that I want to ask the group about this is not actually part of this specific decision. But as we're talking about + +[45:00] the capabilities of this you know this single balances token contract should we consider you know there are two built-in token contracts, that we're talking about right. Now one of them is a pure Soroban token contact implementation the other is this rap token contract or single or just an interface to Classic assets, which by the way right. Now is implemented as an extension of, that first contract should we just ditch all together the concept of a native Soroban only built-in token contract in favor of the Stellar asset one or should we maintain both of these. So the argument this is something Nico's brought up before they argument to keeping it is, that the Nate the pyrosauribon contract would be more + +[46:00] performant right you don't have to spin up a custom laws in VM well yeah a separate wise and Via just for the invocation into, that contract. But it's not necessary right. Because the point of the we only need to make the classical in Native. Because you. So you can run the Import and Export. So we could remove it. Now it's just, that anyone has concerns over the performance of the cost of running like the pure sorbine contracts than or this matters right. But but. If you try to think about like the spectrum of contracts, that might be interested in the Soroban only built-in implementation it's contracts, that are not interested in being a Stellar asset for some weird reason even, though Stellar assets have like you know more you know ecosystem-wide support. So they want something, that is not in the cloud in the Stellar asset, which might be you know like i128 instead of i-64s + +[47:00] and it might be. Because they're interested in a custom decimal value, that's not seven. But but at the end of the day besides, that like what are they getting like why should they do this yeah they may just not be interested in like classic Stellar support right and I can't I don't know how many people would fall on, that category but, that's one reason right and we're not saying, that you can't do it but. If we are removing, that contract. Then basically it'll be deploying your own contract and it's less performant the. But the question is like is this important enough to first class lee you unmuted yourself yeah I feel like in the I mean I feel like we always Place some value or importance on interoperability. And so I think. If you're making a token, that will interrupt with Stellar classic assets + +[48:00] I think the network can be opinionated and say well you know. If it can. If it were. If it functions like, that it should just work it should just be interoperable yeah I really don't see the advantage in saying like in even making, that an option right like we can make it flexible we can keep it this way. So it's flexible and anybody can choose to say no I don't want it to also be available on Stellar but, that just harms interoperability yeah have completely different functionality where they're incompatible. Then yeah, that makes sense you know they're only going to operate on Soren and they're going to have a great time there. But join the team here boots from adapt developer perspective you probably have the most experience with Soroban and + +[49:00] the various token contracts any thoughts yeah and my thoughts might not be super cleared out yet. But from like a pretty high level I guess my expectations would be, that. If I'm going to be attempting to interact with anything on Stellar I would at least expect, that the way, that kind of works to be consistent and I think from you know not having thought about this too long yet like option 2.2 kind of speaks the most to me like at the current moment and I think the main reason is, that keeps the most separation actually, which said I know you disagree with between Stellar and Soroban mainly. Because if I'm ever going to touch anything it's Stellar it's never actually going to leave Stellar Classics control such, that it's going to be a trust line or some other trustline-esque data source, that'll live on Stellar and. If I ever want to interact with something, that's sorb unfocused + +[50:00] unfocused, that can just exist and saurabhan by itself and, that kind of keeps a clear line between the sand there was one comments I made on the dock I mean I think things for users especially will get really confusing. If you're in 2.1 or option one with, which is either dual balances or I have some ability to have a balance on Soroban of a Stellar classic asset. If we have state expiration, that can get really weird for users to keep track of and trying to solve, that Earth think through some way, that you can present, that and make it clear to users is quite confusing. And then just says like a little second part during the conversation after I popped up here I don't really see a reason why we can't have two separate token contracts you know ones, that are actually going to run and rest and be performant one specifically to interact with assets, that are on cellular Classics. So they'll be able to manage trust lines in one, that's just a reference implementation + +[51:00] of a basic quite usable token. So we don't have 8 000 different reference implementations of an erc20 it seems like both of those could be executed and rested quite easily can I just said something regarding like the native or built-in contract and non-wrapped non-classic tokens. So basically what we need to Define anyway is the interface and like there is really no value in not implementing the non-wrap tokens in Russia just. Because because like someone would need to do this anyway and there is literally no benefit of not doing so. Because the interface 2 has to be called right it's not like we can have two contract two contracts these two different interfaces, that you know one would be called classic + +[52:00] only tokens in another one is. Now they both have to pull the same interface. So they will effectively be the same contract no matter how exactly how where exactly has been implemented. So ideally what we should try for is to have some standardized token interface so, that contract Traders could reuse it and you know whether or not there will be like another implementation series I don't see any value of not implementing the vanilla version, that behaves in great vote points way just means a protocol I don't really see anybody like it doesn't give us any more freedom than not implemented in the current interface does it fill this requirement the current standard interface, that we have sort of modular import expert, which only it makes sense for the classic tokens yeah + +[53:00] but yeah of course okay. So we are pretty close to being at time are there any other big questions topics, that people want to discuss yeah. So George was mentioning something to me around how payments and transfers work. So like payments on Stellar and something, that we haven't really discussed is okay so. If this. If there is a single balance or this is even relevant okay sorry. If this is single balance and someone's doing a transfer someone might assume, that would show up as a payment operation on the Stellar side. So you know you might look on Horizon and you might be expecting to see a payment operation my assumption has always been, that these things wouldn't show up like the + +[54:00] it would be much the same as how important export works does anyone have any thoughts about how, that would work I think and this came up in what Eric was saying like at the end of the day. If you have balances changing in Horizon you need to provide an explanation for why, that happens whether it shows up as like a payment operation or it shows up as an effect I think we can discuss. But it needs to show up somehow or else you know like value balances are changing with no explanation correct and I think in the past we discussed, that you know the Import and Export operations would show up as credit and debit effects. And so there would be visibility in, that sense I guess yeah I'm just curious like is the fact, that there wouldn't be a payment operation going to be like a really big deal or + +[55:00] not it sounds like you're saying no. But it shouldn't be a problem. Because this is no different than. When you do it. When you have like trading right like you have a somebody does a path payment or whatever. And then you know you get you have like those little no micro payments happening in between right well your balance changes. So it's just a standout effect not a payment. But just an effect yeah I agree thank you. But yeah to clarify, though this would be only for yeah classic tokens we would not do like this type of stuff for like arbitrary tokens on the network by default. Because that's the Horizon question, that I think every cut earlier, that you don't want to pull like arbitrary tokens you know into Horizon. Because then the data model is becomes extremely complicated okay I see, that we're really almost out of time I just want to say I know + +[56:00] that there's someone with their hand raised and I know, that there's a couple questions in live chat and we will try to get to those after this call. But since we're almost out of time I'll give it back to Tomer thanks Justin. So I think you know all the responses have been positive towards single balances I think this is something, that we need to try and prototype and or put a CAP around there are still open questions the question around opt-in semantics is a big one and it also depends on some feedback from more ecosystem members and partners and you know just giving us some time to wrap our heads around. But I think there is overwhelming support for single balances and we are going to try and pursue these thank you all and have a great rest of your day + +
diff --git a/meetings/2022-07-12.mdx b/meetings/2022-07-12.mdx new file mode 100644 index 0000000000..ce9d4c5d0a --- /dev/null +++ b/meetings/2022-07-12.mdx @@ -0,0 +1,133 @@ +--- +title: "Project Jump Cannon Q&A" +description: "A live Q&A covering the Jump Cannon roadmap, Soroban SDK progress, token standards, fees, and developer onboarding as Stellar prepares to launch smart contracts." +authors: + - alex-mootz + - john-rubisoff + - jonathan-jove + - justin-rice + - leigh-mcculloch + - tomer-weller +tags: [soroban, CAP-46-7] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This Q&A session provides a broad status update on Project Jump Cannon, answering community questions about timelines, tooling, and what smart contracts unlock for the Stellar ecosystem. The team outlines the expected rollout sequence—from local sandbox development to testnet and mainnet—and shares what kinds of applications are already being explored by early partners. + +Much of the conversation focuses on developer experience. The speakers dive into the Rust-based Soroban SDK, local testing workflows, token contract design, and why Stellar chose WASM over EVM. The session also touches on longer-term topics like layer-2 systems, fee mechanics, and how classic Stellar assets will interoperate with smart contracts. + +### Key Topics + +- Roadmap and rollout: + - Experimental futurenet and local sandbox development as the first entry point. + - Public testnet planned ahead of mainnet, with timing dependent on validator readiness. + - Network naming still undecided. +- What smart contracts enable: + - Custom AMMs beyond Stellar’s native curves. + - Lending protocols, DAOs, and more expressive on-chain logic. + - Use cases not possible with the current fixed operation set. +- Soroban SDK status: + - Rust-based SDK with a rapidly evolving API. + - Local-first developer workflow with no network dependency. + - Upcoming documentation and examples. +- Testing and tooling: + - Native Rust test harness using the same runtime as Stellar Core. + - Ability to inspect ledger state and debug contracts locally. + - Emphasis on lowering friction for writing tests. +- Token contracts: + - Standard token contract inspired by ERC-20 with EIP-2612-style permit flows. + - Signature-based approvals for better UX and gasless-style interactions. + - Goal of strong interoperability with classic Stellar assets. +- Fees and CAP-55 (now CAP-46-7): + - Overview of the multi-dimensional fee model and computation metering. + - Ongoing debate around state expiration and long-term ledger growth. + - Acknowledgement that alternative approaches are still being evaluated. +- Architecture choices: + - Rationale for choosing WASM over EVM. + - Focus on parallelism, safety, and long-term scalability. + - Possibility of EVM-based rollups running on Stellar in the future. +- Ecosystem considerations: + - Anchors and issuers can continue operating with classic assets unchanged. + - Smart contracts may introduce explicit minting semantics. + - Open questions around NFTs and standardized representations. + +### Resources + +- [Project Jump Cannon: Choosing WASM](https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm) +- [CAP-0046-07: Soroban Fee Model](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-07.md) + +
+ Video Transcript + +[00:00] Hey everybody welcome to the Q&A session for Jump Cannon and I think yeah jeff neal already asked three questions. If we want to jump into those his first question was has the name been chosen yet the second question was what is the approximate test net month of launch. And then the same question for mainnet I can try some of these things. So the name has not been selected yet hopefully within a week or two we can answer, that question the generally speaking the main at launch is scheduled for enevir and it might slip into next year. Because in the past we've learned, that making like a major release at the end of the year is probably not the best thing to do. So we expect end of December beginning of January for may net. And then end of November a month before, that + +[01:00] for testnet and you know the couple of months before, that were already slated to have a like experimental future net. So just like a dedicated test net to work with smart contracts on and just for the full loop we also expect, that by the end of this month you'll already be able to deploy contracts locally in a sandbox using the SDK it's a future very cool let's get people to ask questions in voice it's more interactive yeah go ahead and submit a request to speak. If you'd like to ask a question we will pull you on up put you in front of the class. While we wait for people to oh never mind we've got a request all right life learner yes thank you what do you see or do you have any initial projects in the pipeline for the end of year. When you do the release + +[02:00] so can you repeat the beginning of the question I missed, that. So let me try again do you have any projects in the pipeline for the initial release we're talking with several companies, that are interested in building on Stellar I can't disclose names right now. Because nothing is finalized. But you know like the usual suspects of you know like AMMs and various you know lending protocols right yeah the keep those requests coming Tomer do you think we're gonna see dowels being established through those smart contracts I'm sure we'll see dows yes what is the first dow you would like to see I would like to see a dao for a dow building platform. So + +[03:00] so cool yeah great answer yeah I think I'm still I'm stealing this from jason our ceo oh he has he's also expressed an interest in the past and having like a dow I mean it's got a ring to it John jove what's going on with the token contract it's in progress it exists, although I actually don't think i've posted anything publicly really about it I mean lee and I were just talking about it a second ago. But maybe I should actually post a link to the repo it lives in. But but tell us a bit about the token contract I mean there's not a ton to say it's a token it looks a lot like an erc20 token with the exception, that like everything is done in the style of eip 2612 permits. So there's lots of signatures being passed around, that makes it really flexible what I what is built. So far isn't actually like + +[04:00] compliant with the spec, that we published for it. But I'm going to work on getting it closer to spec in the next couple of days. So probably by like I don't know I don't want to say the end of this week. But the end of next week it'll probably be pretty close and pretty reasonable it won't have all the functionality. But it'll be pretty close I'm going to post a link to, that in Jump Cannon right. Now so give me a second, that's awesome it works i've tested it locally it does seem to work and can you share with the audience what is the story with eip 2612 what does it improve on the erc20 contract yeah, that's, that's a good question. So basically this eip2612, which I always refer to as eip2612 permit. Because permit is like the target function, that this whole thing was done for basically was an improvement on the classic erc20 approved flow. So you know basically at the beginning + +[05:00] of the whole erc experience or the erc20 experience what you would do is you're like okay I have this contract I want to interact with it I'm going to approve it to transfer balance from my account. And so you submit a transaction on Ethereum you wait for some you know there's some confirmations and eventually you're like okay this is final and we're happy and you submit another transaction, that actually does the, that actually like you know lets the con interact with the contract form by itself. And so basically you first paid cash gas through the first thing. And then you pay gas to the second thing with eip eip2612 permit you do this all as a single transaction by just passing a signature to this permit like a signature to this to the contract in the first place. And then it uses the signature on the permit function and the permit verifies the signature. And then does the same stuff, that approve would have done. So it's basically this like ergonomics improvement. And so kind of everything here works + +[06:00] like, that. So you get, that kind of ergonomics improvement everywhere. So would it be correct to say, that the ip2612 basically like separates the kind of like who's submitting, that transaction from like who's authorizing the moving of the funds, that's exactly right okay around sorry you want to go do you want me to go you go a lot of the conversation around eip 2612 was around this like this gasless concept basically like I shouldn't need to have an account, that has money in it basically to do stuff or has ethernet. And so this really like these kinds of like you know me passing around a signature and letting somebody else act on it makes these kind of gases concepts much easier very cool, that's exactly what I was about to say we have another question from the + +[07:00] audience sorry. If I mispronounced your name. But jason godev asked given, that Stellar has some built-in operations, that serve off-chain scripts and apps already what use cases will smart contracts serve, that is not possible with the current SDKs. are just like extremely or you know the current protocol is consist consists of around 20 high-level operations, which doesn't really allow you to like you know express a lot of things. So say for example, that I wanna introduce an AMM, that is, that's built on a curve, that's not supported by the native AMMs on Stellar like table swap or let's say, that I want to have like a you know like a trust minimized lending protocol or kind of like a non-trivial size dow, that does, that has like you know stake weighted voting trust minimized. So all these things are like not really doable today + +[08:00] great. If anyone from the audience has any questions feel free to drop them into live chat the tech's channel or you can go ahead and request to be on the stage and ask your question through voice maybe in the meantime lee can give us a quick update on what's the status of the SDK what should we be expecting for this like July iteration and what are you lee most excited about yeah I mean we're still working on the SDK John has proven, that you can build something with it, which is fantastic at this point the api is still relatively unstable. So I mean as we talked just before this event I'm adding things like iterators slicing to vectors and you know there's certain things about the api events, that are probably going to change in the next few days to make them more ergonomic to make them safe to use. And so we are really + +[09:00] still the SDKs really young and we're still yeah making a lot of changes to it and you know the end of by the time we come to the end of July hopefully we have a lot of the building blocks the primitives there in a somewhat stable state even, though the SDK is in flash blocks I do want to emphasize, that it is quite sleek like the stuff, that works. When it's pretty cool it's very kind of easy to use. So I had a good experience working with, that is the SDK something people can look at right. Now today or is it as has it yet to be put out into the wild I mean the code is public it's on GitHub it's just go to github.com Stellar and I'm sure you'll find it the docs aren't up yet the anywhere in particular for it just yet. So you know. If you're looking for docs. Then you know, that's not out yet. But we should have + +[10:00] docks out with you know the July release at the end of July yeah and John's token contract is actually a really good starting point right now. Because it's it uses the SDK it's fairly readable and ergonomic and well organized for someone who's maybe not familiar with the way, that rust works Tomer like. If they do go look at John's token contract what should they look at first. So at any rust I don't remember the lingo. But you should look at the lib rs file. Because that's like the entry point for any code, that is not executable general good guideline for looking at like a jump can and contract with the SDK would be to go and find a function with the text contract fn directly above it. If you see, that you know, that's an entry point to the contract. And then you can go and see what it does by following, that through in my particular case + +[11:00] right. Now like Tomer's advice was good all of those functions are in lib.rs. But I actually have a like a refactor, that changes, that and puts them all off into a separate file so, that would still allow you to find the right stuff and I think the other thing to look out for too is you'll also see types, that are, that have an attribute like contract type at the top of them and they're all types, that will end up either in the api or will end up being stored so. If you're familiar with like the Ethereum api or like what types, that get generated in other ecosystems looking for, that contrapti tag you know types, that are going to be interesting for you to look at lee can you talk a bit about testing in our contracts I know, that testing is like a big issue in the world of you know Solidity smart contracts I know there are a lot of frameworks to facilitate, that what's my strategy as a person who's like developing for Jump Cannon yeah it's a great question we really + +[12:00] want testing to be like really smooth mostly. Because you know we want people to ride tests whenever there's a barrier to entry to writing tests you know let's admit it as engineers it's tempting not to. So write tests in terms of a Jump Cannon contract, that's written in rust will be tests, that you can run locally on your computer in your ID so. If you're using an ide like vs code you'll be able to just run those tests like you would any other rust test your writing and in, that in, that test you'll have access to a host, that acts very much like the runtime environment, that your contract will actually run on chain and the reason acts very much like it is. Because it's actually backed by the same code. Because the runtime environment is written in rust in Stellar Core. And so we basically import, that into the tests + +[13:00] and we're running the contract within it and much of the mechanics of, that we're still working through, that and John and others are really help identifying like where the gaps are, that we need to fill in to make it work as smoothly as possible for everybody. But yeah you'll be ideally be able to construct a couple of contracts. And then you know run them in your test. And then inspect you know here's the state of the ledger after running them and be able to you know write assertions about okay you know you expect the balance of this account to change by this much or the balance of this token for, that account I mean and it'll be all running locally on your system and you'll be out, which means you'll be out of you know step three debug inspect the values of variables and different things like, that step three debug a smart contract, that's pretty wild + +[14:00] okay I see we have a bunch of people joined we are talking about Jump Cannon type of stuff, that we're gonna see coming in the next few weeks and we're happy to take questions and we have a question in the text chat from matthias he says with smart contract support I wonder. If you see a possibility to build an infrastructure like layer two of apps tap chain side chains using Stellar as their layer one yeah definitely we're we also had some folks looking at potential like what type of crypto primitives do we need to do zk roll ups in an efficient matter. So I don't think this these are things, that we're going to see immediately like at launch this year the beginning of next year. But with time we definitely expect more sophisticated l2 constructions to show up these will probably take the form of + +[15:00] zika rollups and the cool thing is, that you can actually you know like the zika roll-up can be something else from Jump Cannon it can obviously be like another the same execution engine. But it can also be something like an EVM. So you can have like an EVM roll up running on like a Stellar base chain, which is pretty cool what about you same question, that you asked lee what are you most excited about for the launch of smart contracts on Stellar like what would you most like to see I really love the fact, that the SDK like builds on rust. So you can use like the entire ecosystem of rust tooling they're at your disposal. So you can do things like use rust tooling for fuzzying or you know the type of testing frameworks, that lee pointed out and you know most of all I'm just like really excited to see what others build and what are the things, that we're not thinking about right. Now + +[16:00] in terms of getting other people to build you know we're just starting to think about this. But like someone wants to get involved what's the best way for them to start getting involved. So right. Now I would say, that things are kind of like all over the place in terms of you know you can start reading CAPs I would like I said before I think the best entry point right. Now is through John's contract. Because it's the most like comprehensive and up-to-date Jump Cannon contract and just like learn from, that we are working on a set of docs, that should be live by the end of this month and these will relate to just like learning about how this whole thing works some examples and tutorials around how to like write a basic contract and use the SDK it will really be focused on like the local sandbox developer experience. So you don't need to know you funny enough you don't really need to know anything about Stellar in order to + +[17:00] start developing for this you don't even need to have to be talking to a Stellar network all you need is like this local sandbox, that's in the SDK I also have another question from matthias in terms of infrastructure what would be your top number one priority for smart contracts projects says he's thinking on Stellar mainstream stream adoption time frame q3 slash q4 2022. So infrastructure number one priority allow people to build smart contracts yeah I would say, that the composable like defy lego blocks. So things like AMM specifically the type of AMMs, that we don't have natively in Stellar things like you know stable swap things like uni swap v3 definitely interesting interested in seeing things like compound and I would love to see some + +[18:00] infrastructure for to start building downs no one on this call has this question. But definitely. When I start to talk to people about this process the first question, that people have is like oh is this EVM compatible can you just give a brief like highlight about the choices the tech stack choices and why we made them yeah sure. So we actually put out a blog post a couple months ago around why we decided to go with wasm and we outlined a few options, that we considered. But ended up not going with you know the EVM is like the big elephant in the room obviously it's on a bunch of different chains from Ethereum to avalanche and others. But at you know the bottom line was, that we were looking for something, that we can really scale the network with and the EVM + +[19:00] is you know has a lot of good things about it. But there are some kind of glaring issues, that we didn't really want to ignore one of them is, that it just doesn't lend itself well to parallelism, which is something, that is very important for us moving forward with the type of real world scale, that we're looking for the other thing is, that they were just a lot of foot guns in the EVM and a lot of ways to unintentionally do the wrong thing. So we wanted to make sure, that we build something, that is you know it's batteries included you don't need to you know like the token contract, that John is building right. Now is going to be kind of like the native default way to build tokens on job canada. So you wouldn't have to do stuff like deploy your own erc20 contract, which you know in the Ethereum ecosystem everyone just goes and like copy paste + +[20:00] you know the OpenZeppelin erc20 contract and a lot of other contracts as well. So there's a lot of small things where Ethereum is not like EVM is not the friendliest for developers and we wanna make sure, that we're building something, that's friendly for developers and builds for the future we have another question from mathias EVM compatibility you just need to build on EVM compatible using Jump Cannon smart contracts like aurora.dev or near something to think about to bring to some Ethereum projects I guess not really a question. So much as a statement did end with a question mark, though yeah in my defense and I posted the wazon blog and the live chat channel awesome yeah I think aurora is a really interesting example of + +[21:00] you know I think both near and avalanche you know recognize, that EVM is not really what they want to build on. But at, that time from their perspective like the way to get to market was to have something EVM compatible right. So you know avalanche came up with the c chain, which is EVM compatible near came up with aurora and like these are fine projects and they really show how like the you know the developers of the chain themselves don't necessarily think, that the EVM is the way to go. But they you know they put in something you know aurora is a complete EVM, that runs as a smart contract, which is a very interesting approach it's pretty wasteful both in terms of kind of like time and space. But it also kind of like ruins the benefits, that you get from near right. Because now you + +[22:00] can't actually you know all the sharding and the parallelism, that you get from near you don't get in aurora right and. If you look at the actual tps numbers. Then they are like extremely low. So I think there's like you know there are pros and cons to every approach all right and moots has joined the chat you have a question yeah I was just hoping to ask about an update towards 55 I believe was the number I think the last Stellar dev chat we had was on some of the fee mechanics specifically the underlying state exploration and I know, that was a few weeks ago and with July 4th and all I wasn't paying a ton of close attention. So I was wondering. If there was any updates you could provide and I know it might not be fair since Niko's not here. But just kind of regarding, that no, that's great Niko's not here. So we can talk about [CAP-55](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-07.md) I would say, that + +[23:00] we haven't there are some internal efforts to come up with like opposing CAPs. So I think, that [CAP-55](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-07.md) in its current form you know we all acknowledge, that it's a huge it's different from anything other than you have out there and we want to make sure, that we consider all the alternatives before taking such a step. So the CAP moods is talking about is [CAP-55](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-07.md) it's around fees most of it is around like gas and metering and or metering computation and kind of like state allocations, that is the less offensive bit about [CAP-55](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-07.md) also presents this idea of like state expiration, which is, that stake state can expire after a specific time. If it's not used or not touched, which is great in + +[24:00] terms of making sure, that the ledger state size is manageable. But introduces an additional level of complexity for contract developers to have to actually both contract developers and for users to make sure, that you know important state just doesn't like disappear into the abyss on, that topic I highly recommend the talk given by the geth lead developer last month in prague around this kind of like time bomb, that's in basically like any uvm chain out there, which is you know like the state is growing in a rate where commodity hardware won't be able to handle it soon enough and a lot of the EVM chains out there, that are just kind of like cranking up the tps numbers they're just gonna hit, that wall a lot sooner + +[25:00] so you know we wanna make sure, that you know we're cognizant about this issue and have some strategy to mitigate it cool thanks for the update and we have a question from jason c how is develop development of linking Stellar classic tokens and their potential smart contract equivalents looking will classic tokens and their smart wrapped versions be interoperable in the background or will developers need to create their own wrapped versions, that users will need to trade between this one's for me I can tell. So so this is a good question and the wrong one the interoperability really comes from the contracts, that smart rather the contracts, that other developers built the native token contract, that we're going to provide is going to have good interoperability between classic assets and + +[26:00] smart assets. So you know basically. If you use, that and you know the contract, that you build is compatible with, that contract the native contract. Then you're good to go. But like let's say, that you like some other kind of you know token you know on Ethereum there are more types of tokens than just erc20 tokens maybe, that's not a good compatibility fit for you. And then you don't get the interoperability guaranteed. But but in like the most basic case where you have a Stellar asset classic asset it's fun it's like a fundable asset like it's a argentinian peso for example. Then like you should expect to have excellent interoperability with a smart contract assuming, that the native contract we're providing is widely adopted I'm saying same question regarding NFTs sorry I think I just cut you off John. But I think you were just going to read, that question out right yeah I was going to do, that I was also going to ask. If jason was the ceo and + +[27:00] then I asked, that question with regard to the NFTs Tomer has asked me the same question about like what we should be doing for like NFT interoperability against like classic NFTs and I basically told Tomer like I haven't thought about and i'll think about it in August. So I don't know the answers to, that question yet Tomer might have some insights on, that regard in terms of what he thinks should happen. So i'll turn it over to tomorrow here yeah I think, that's a very good question, that we need to figure out I don't think fred is in the audience right. Now but I would love to get fred's take on it whether we need to have this like opinionated like default NFT contract you know the type of NFTs, that you can issue in and by enlightenment today are still interoperable with Jump Cannon in the sense, that any token from classic is interoperable + +[28:00] through these like rap tokens the question is do we want to introduce like a native representation on the smart side and what form should it take should it just be like this regular token with you know with like zero decimal places those are open questions and I think we need to kind of like survey NFT contracts in the ecosystem to better understand, that and, that's like John said, that work is slated for next month all right matthias has another question I'm glad he labeled it as a question for me are there any current education Stellar partners such as Stellar quest or the like working on trading and or education in preparation for Jump Cannon's launch smooth onboarding for devs no not yet but, that's definitely planned you know like I said right. Now the you know the docs and the materials are just like not in a place + +[29:00] where it's easy to onboard. But it's a very top priority for us to get there I think maybe seashells wanted to be in the recording. So seashells has wished us all a happy thursday we have pantheon wants to speak here you go I just I don't know. If you can answer this quickly or not it's more of a security question as to do you see any security flaws with this platform or any potential issues particularly right now. If current security flaws. If if someone gets a hold of your secret key your wallet's exposed would this smart contract be kind of in the same situation where. If someone has a secret key access, that's, that the whole smart contract is. Now exposed or I would say, that smart contracts introduce + +[30:00] a lot of you know they allow contract developers to write arbitrary code, that is deployed on chain it's it definitely is more powerful than the current protocol. But it definitely gives developers some more rope to hang themselves with from the actual platform security perspective we're planning on multiple audits to ensure, that the platform itself is safe with, that said you know it's impossible to enforce safeness in the protocol level and it will be up to developers to ensure, that the contracts, that they write are safe we are going to publish a lot of information on how to maintain security and we are also planning on a grants program, that will include + +[31:00] grants for auditing. But at the end of the day there how should I put it you know like shitty code will exist and it will be unsafe you can never stop it. But yeah I just want to know the likelihood and like sort of the system, that's being created the possibility of, that happening. But overall thank you for the answering, that we have one more question from the text channel can you talk a little bit about anchors and jump canon yeah. So from the anchors perspective they don't actually need to change anything about their offerings. Because you know the type of assets, that they're dealing with are classic assets and they can. So nothing really changes about the on and off ramp. If if they're you know they could. If they choose to + +[32:00] on-ramp directly to smart to the smart side or allow off-ramping through, that. But we expect wallets to be able to officially move assets between these two sides so, that is not necessarily a required thing for you know for issuers even issuers of classic assets could with their existing issue or authorization they can move to mint over on the smart side for the same asset the exact same asset one advantage of, that you know we hear a lot from issuers, that Stellar's model of like implicit minting through payments is sometimes a bit idiosyncratic for how they do things on other networks and the nice thing about the standard token contract on the smart + +[33:00] side is, that there's an explicit mint operation, which makes things a bit more palatable. So maybe we'll see issuers move to minting on the smart side. But I'm not sure. So I think we're we passed the 35 minute mark and. If there are no other questions maybe we can call it a day. If you do have other questions for us come find us in the Jump Cannon channel we're you can probably tell, that we're in the dev channel all the time. But talk to us in the normal one and we'll definitely respond to you guys + +
diff --git a/meetings/2022-08-31.mdx b/meetings/2022-08-31.mdx new file mode 100644 index 0000000000..0970208dca --- /dev/null +++ b/meetings/2022-08-31.mdx @@ -0,0 +1,165 @@ +--- +title: "Stellar Ecosystem Panel: How to Succeed in the Stellar Community Fund" +description: "A community panel with past Stellar Community Fund winners sharing practical advice on crafting strong submissions, engaging the community, and using SCF as a launchpad for real-world Stellar projects." +authors: + - anke-liu + - diego-yanez + - nat-robinson + - olufunto-boroffice + - sam-sealey + - wouter-arkink +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This panel brings together founders from Alfred Pay, Beans App, Leaf Global Fintech, Stellar Global, and Quidroo to share firsthand lessons from participating in and winning the Stellar Community Fund (SCF). Speakers reflect on how SCF goes beyond funding by offering community feedback, visibility, and validation that can accelerate early-stage ideas into production-ready products. + +The discussion emphasizes what makes an SCF submission successful: clear problem framing, early user validation, thoughtful budgeting, and authentic community engagement. Panelists also share candid insights about iteration, failed attempts, bootcamps, and why persistence and openness to feedback matter more than a “perfect” first submission. + +### Key Topics + +- What the Stellar Community Fund is and how it supports builders through grants, feedback, and community voting +- Lessons learned from past SCF winners across wallets, anchors, payments, and SME finance +- Why early community engagement (Discord, social media, direct feedback) improves submissions +- The value of starting small with funding requests and scaling in later rounds +- Importance of clear problem statements, simple MVPs, and showing real user traction +- Design, demos, and presentation tips that help projects stand out during review +- Using SCF as a stepping stone to accelerators, investors, and broader ecosystem growth +- Advice for first-time applicants, non-technical founders, and repeat submitters + +### Resources + +- [Stellar Community Fund](https://communityfund.stellar.org) +- [Stellar Community Fund Handbook](https://communityfund.stellar.org/handbook) +- [Stellar Network Overview](https://www.stellar.org/learn/intro-to-stellar) +- [Stellar Grants and Ecosystem Programs](https://stellar.org/grants-and-funding) + +
+ Video Transcript + +[00:00] It is 102. So hello everyone, welcome to today's panel. My name is Anal and I'm a program manager on the ecosystem team at the Stellar Development Foundation, which is a nonprofit organization founded in 2014 to support the development and growth of the open source Stellar network. So welcome everyone. So today I'm we are having a panel about how to succeed in the Stellar Community Fund. Now will we will talk about the Stellar Community Fund quite a bit and we will also refer to it as an AB abbreviation, SCF. So you'll be hearing that a lot. SCF stands for Stellar Community Fund and SDF stands for Stellar Development Foundation. So be sure to take note of that. but what is SCF? So the Stell Community Fund is an open application grant program hosted by SDF + +[01:00] That serves as the springboard for many businesses and Developers, for many businesses building an on Stellar to turn their Concepts into tangible products and services, and it's also where SDF is providing funding. But actually, the community allocates where the funding goes to. So, together with its predecessor, the Stellar Brill challenge, has been around actually since 2016. It's quite ancient in the blockchain industry and has distributed almost 200 million lumens to hundreds of Stellar-based projects based on community input, including familiar names like StellarExpert, Lobstr, light Min and ancl, and a few names that you'll hear very soon. So if you ever looked at Grand space and `stellar.org` you see that we actually have many grants. But SCF is really quite special. Three reasons: all kinds of use cases and pro projects, scope sizes are + +[02:00] Welcome and the community is actively involved in guiding and supporting the projects, as well as voting for projects they want to see earn a grant. And SCF grants are no strings attached, so that means that Grant recipients decide how to spend their Grant and don't have to pay anything back. So it's quite special. How does SCF work? In short, it gives these, like you know, Community companies and Builders an opportunity to present Stellar-based projects to the BR Stell Community for feedback and gives the BR Stellar Community an opportunity to give feedback and weigh in which projects to serve Lumen grants. We are doing it currently by anually on a Cadence on in the form of rounds. So each of these rounds are listed on `communityfund.stellar.org` they have a set pool of lumens allocated for distribution to winning projects of that round and they consist of a few phases that you see here. + +[03:00] Currently we're actually in the submission phase for the 11th edition of the Stell Community Fund and has a total pool of 8 million XLM to distribute. So that means that any eligible developer, team or company building on Stellar can submit now to the SCF and request funding up to 200k depending on their project scope. So, aside from funding, participating in the SCF allows you to gather useful feedback from a community, gain Traction in the ecosystem and open up a unique opportunities for growth. We're also having a very exciting startup bootcamp if you submit before September 25th. So definitely look into this. But to be considered for participation then really to you know, before your project reaches Community eyes, your project need to pass the selection panel + +[04:00] First, and this is no small feat. So last round, actually less than a third of submissions were selected to move forward and to learn how to set up your project for Success. Stay on to hear from leaders of Allstar pro project, winners of the previous Ed SCF rounds- Alfred pay, beans app, Leaf Global, fintech, Stellar Global and qued on how to set your project up for Success. So I talked a lot, but let's start with some introductions and we'll start with Alfred P, so take it away an thank you so much for having us. Just a little bit of intro on us. In Alfred pay, we participated in Stellar Community Fund number nine. We are fortunate to be one of the winners there. A little bit about us and what we do. We're currently an anchor on the network that processes USDC payouts into the Dominican Republic. Our ideas to scale + +[05:00] From there and also build the Dominican Republic stable coin as we scale our solution there and vertically. We also have the Alfred pay wallet, which is a user facing product, and we brand ourselves as a web 2 5 company because we use blockchain as the underlining Tech, but we provide a traditional payment experience so users are able to send USC by connecting their bank account with plaid, Apple pay or V Master cards and then, very much like the money gam integration. Our goal is start with that and we try to provide as many outlets into local currency as possible. So currently we're part of the largest ATM network on the ground in the Dominican Republic that allows you to withdraw in: over 800 ATMs on the ground. So USC for local Dominican pesos. Also over 150 brick and mortars- aside of the ones that money gram has, which I believe they have an additional 600 there- and, last but not least, door to + +[06:00] Door delivery, so door to door cash delivery in under two hours in exchange for USDC, which is something super powerful. And, yeah, last but not least, we're operational in the Dominican Republic, as I mentioned right now, and in quarter four of this year we're going to be launching our anchor in Haiti. Wow, that's really impressive. I can't believe like it's been a while since. I also heard from you, and every time I hear from you, it seems like you've like just done so many new developments and really making real change for people, so this is really exciting. Great. Well, thank you so much for being on board. Let's hear from beans app next. Great, yeah, thank you. My name is wter, founder of beans app, and beans app took part in the last SCF round on number 10, and beans is a non custodial wallet that enables anyone to send internationally at zero fee. So the problem we saw, as many people + +[07:00] Building on Stellar know- is that International payments are slow and expensive, and blockchain could solve that, but it's often very complicated for normal people. So what we've done is build a noncustodial app that removes all the crypto or blockchain things to the back end. So, as a user, that means you don't interact with private Keys, you don't interact with trust lines or any of that. So when you create your wallet, you choose your favorite currency, for example, dollars, and then if you want to send money to someone in I don't know, Argentina, that wants to receive peso, all you have to do is look them up. The app will know that they want to receive pesos and show you the right conversion, and all you have to do is type in how much you want to send, so you don't even have to think about the currency they want to receive, and that all with zero fees. So and the same you can do with requests, so you can request something and when someone pay, it will + +[08:00] Automatically convert into the right currency for them. So that's beans. So, basically, a global venmo is what we're trying to build here. Super excited to be part of this, so thanks. Yeah, I was also really excited when I heard that you, as you know long Community, longtime Community member, like finally like had a project and it was like super like. I really like this project, like it seems like it's a got a very it like this has been an existing problem, right, but really having like such a Innovative and like such a like really great design and great thinking about for the solution, I think that's what's really unique and yeah, so I'm very excited. All right, well, we'll hear more from you, but let's go. I want to hear now from Funto of + +[09:00] Quid. Fto, are you able? I think you need to unmute first. Okay, sorry about that. Hi everyone, fto barice, I'm out of Abuja, Nigeria. I'm excited to be here and part of we won in SCF round nine, and cryto is an online facturing platform that, where transactions are streamlined, where smes can get paid fast cas cash- what when they sell their invoices, and how we're leveraging Stellar platform or the blockchain is really to optimize the payment and financial transactions. And for us particularly, we're optimizing for quick, transparent and efficient invoicing financing for smes. We- the funding we got in the saf round nine- definitely helped us to build- we're still build- mode, but we've been able + +[10:00] To sort of make inroads. We pivoted a bit because we were focusing on smes before. Right now we're working, trying to work with vendors directly, just because of some of the issues with repayments and things like that, but excited to be here to answer any questions that anybody has. Awesome, yeah, no, it's exciting. I remember so quid was also like- participated in a 2021 bootcamp with the fs lab and you were also very early in development then and kind of seeing how much you've already grown right and it's kind of unprecedented, almost for like a start up. Like I think it's really great work there. So, yeah, excited to hear more from your learnings as well. But we have two more introductions: Nat from Leaf Global fintech, take it away great thank you, and thank you all for having us. Leaf is a digital wallet that was designed initially for refugees + +[11:00] And migrants- people who are fleeing conflict and carrying cash across borders but may have a mobile phone, and so we've designed our wallets to work and help customers who have both smartphones or non smartphones we use a technology called USSD that's very prominent throughout Africa and East Africa, where customers can load funds onto our wallet and cross those borders safely without carrying cash. So we are live in Kenya, Uganda and Rwanda and looking to expand to more partners and more countries throughout Africa, but really have designed leave to be a global solution. We spent a lot of time in Colombia and helping and talking to Venezuelan migrants coming across the border and everyone facing the same challenge of carrying cash. Having a phone can't get a bank account, but I think can really benefit from these Services. I will say also that we launched NFT art sales program for some of our Refugee customers on Leaf to help them create and mints- NFTs on life mint- + +[12:00] And then sell those to the global community and then we would send those funds directly to their Leaf wallets. That's been a lot of fun and exciting to work on with Stellar. Yeah, no, thank you. And Leaf Global fex also been around for quite a bit, like I think it was the seventh round, right, this is already feels like ages ago. Yeah, you're Old Timers in this new industry. No, but I think it's really great and really cool to see such an Innovative use case as well. That's making real impact. So, yeah, no, looking forward to hearing from you more. And, last but not least, Sam from Stellar Global- take it away. Hey, Ana, hey everybody, great to see all these faces again. Yeah, so I'm Sam and I founded a community called Stellar Global, actually back man 2019, and so actually been doing it for a while, and really it was + +[13:00] Kind of birth out the notion that I always looked at Stellar as this incredible opportunity to Foster entrepreneurs and developers and really help to build economic prosperity and opportunities for a lot of people. And so, yeah, Stellar Globe has been great. We entered into the fund actually just last year, so I think this was what the 10th one, was it the 10th one, I'm wondering? Was it the 10th, 11th? I don't even know what number we were now. No, you were in the ninth, so it wasn't all right. Yeah, so it's been great. It's really allowed the ability to do a lot more onsite this past year: having a chance to speak at Davos and be a voice, also head to Washington DC where we helped to Advocate for Black and Latino developers and entrepreneurs that are building on chain, as well as several other things you know, doing podcast videos, engagement. So, yeah, we could + +[14:00] Talk a lot more in detail later on, but really glad to be here, awesome. Well, thank you, we're honored that you're here. Sam is definitely the most active Community member that we have, and even much more than that. Like you're also part of light Min now, right, yeah, another SDF winner as well. I got to meet Fred back in 2019 as well, and so it's been great. I on a strategic advisor for light mint and we're doing some great things, so really happy. Shout out to Fred, awesome. Yes, shout out to Fred if he's watching, hopefully. U well, you know, and that really leads us into the discussion, right, and so here, if you're here in the audience, if you're on Zoom, you have the chance as a an audience member, to ask questions to any of these panelists and I will be taking a look at the questions, the Q&A section here. So definitely add your questions and I will ask them if they're you know, if + +[15:00] They're relevant to the discussion. So let me stop sharing my screen and let's start in panel mode. So let's go into one of the first questions. So, for the like, for about 15 minutes, I really would like to know a little bit more about your experience, like coming into the SCF and how they, how you've grown since then. So, yeah, like you know, and this can be of any phase, like maybe the- you know, the submission, or like the community discussion, like what was your experience in the SCF? Anyone feel free to chip in, otherwise I'm going to assign someone. All right, Sam, since you have your microphone unmuted. Yeah, you know, for me it was great. So, you know, I started off, like I said, as a community member and I typically looked at the CF + +[16:00] Strictly from like a business, like okay, like look at the businesses that are joining in, but for myself, as Sanka mentioned, I was spending a lot of time just in doing things. And so what the SCF funds allowed, you know us to do was really from encouragment, one from the Community. They're like, hey, look, there's a lot more you know you could do. You know we're spending a lot of time involved in this. You know you definitely should reach out to it, and so getting involved was really great going through the process and, yeah, it's really allowed just for larger exposure, not just for Stellar Global but, you know, the entire Stellar ecosystem as a whole. I got to do a lot more traveling and really put that, I guess, the message and the mission of what Stellar is all about on the road and I think it helped to make a lot more impact and then also, you know, having those funds allowed me to free up my time. I wasn't working throughout the air kind of focus solely on this, and so it allowed + +[17:00] For a lot more Twitter spaces, a lot more, engagements, you know, speaking with leaders all across the world. So yeah it. I think it's helped there and we've seen just that sort of impact within the community members. I'm really happy the way Stellar Global itself has grown out. You know, from it. We've seen other businesses, we- there's many here on the call here- that has used, our, utilized and leveraged the community. I know I've interviewed like, say, leave Global. I've been down to Miami to see what Diego was doing firsthand and really support Alfred pay. So I think it really opened up those Dynamics. And then we're seeing other folks that are doing things as well. I mean you seen from Stellar Global, you see, for instance, what eir has is his impact that has been that he's put out throughout the community as well. We see people, let's say, like the SDF, who has been leveraging the Stellar Global community directly as well. Their Community manager is in + +[18:00] There. Often we see members of the engineering team that are leveraging Stellar Global Discord as well. So you know, I think it's really helped to kind of create a foundation of the culture for the community and hopefully that can spread into other endeavors. But, yeah, I'll pause there. Awesome, yeah know, I really enjoy being in there. It's also like everyone's also so like positive right, because you have like so many communities like out there on the internet and it's like you know, like, oh, like you kind of have to try it carefully because, like you know, there's scams left and right, especially in the crypto Community. But Stellar Global, like when I was in there, like people are just so nice and like you know, and that is like really good Community Management, like fostering that culture is so important. So, yeah, definitely shout out and shout out to some people that you mentioned. Like's shout out to John, I know he's in there all the time- spintax, great. Well, speaking about Community, right, vouter, + +[19:00] I know that you've also been in the community for a long time. How did you get started and then how did? What was your experience in the SCF like? What stood out? Yeah, I think this idea of wanting to pay in a non custodial way and for being able to pay anyone has been in my mind for quite a long time and since I traveled to east of Asia and having to pay or transfer my money from euros into the local currency was very hard. And I found Stellar and did it by that in 2017, and then I was really in inspired and hoping for some Global ven mode to come up, and then I haven't seen it, so started it. And then, on the experience of SCF, what I think is, besides the funding and speeding up your development, what is super interesting, is that you are forced to do a lot of things, basically by taking part, so what you do is you get engagement from very + +[20:00] Engaged testers, so you get people that want to review what you're doing. So they give you very valuable feedback. So that's very important to us as well. So that's one. Another one is that we did this community pitch right. So, yeah, you're forced to write down what your problem is, what product you offering, so that's good to think about. You write it down as well as part of your submission, which is good to do. You can easily fall into the Trap that, yeah, I've got it all in my mind, right, but it's good to write it down for someone else to read it. And then, finally, you have a whole network of people seeing what you build, so people can reach out to you in a very early phase and create valuable connections from that. So I think, in the experience, it's way more than just applying to get funding. It's also a process that gives you a lot of value. I think yeah, no, I think that's yeah, that, because that's also what I hear from a lot of people that like, yeah, I + +[21:00] Mean they, you know they need funding, like everyone needs funding to get started, to fund their development, to fund like any, like to scale their project right. But I guess, like, what people walk away with is really that experience like, let's say, like Nat fto or Diego. Does that resonate with any of you? Feel free to Che in: yeah, so for me, you know I was very- I mean- fortunate and privileged to have attended the bootcamp before joining the community and I think that was a game changer. Like you mentioned, we were very early stage when we applied and that's one thing I would say to anybody who's listening: you don't have to have like a project, or you know something that is already established, or you know it could be early stage and you can still apply right, and I always help that. I'm a non tech female founder, so I literally learned a lot during the book camp and just even in terms of just, you know, design thinking and all that right was is very great. And then joining the community. I will say that, + +[22:00] Similar to what w had said, you know there are people who will kick the tires around your idea and sort of challenge you, and then you have to go back and sort of look at what you're doing and say, oh, does this make sense? Or how do I solve this? Or how do I answer this, because you know that they're somewhat waiting for your response, right, so you go back to your team and you're like: okay, so what is you know? How do we solve this? Right, this is a problem and if these people are asking this question, it means other people are going to ask the question. So for me it, the community- was great that way. But, like I said, I, you know people sometimes might not apply because maybe they're worried or scared. Or for me, I, if I could apply- and you know I'm still on this panel today talking- I'll encourage anybody who's building anything on using blockchain to apply, because you never know one again, there are people in the community who are willing to help, who are willing to ask questions, who are willing to, you know, give their five cents as needed. So you can't really go wrong, right, so it all like an incubation place as well, where you're building, but there's a lot + +[23:00] Of safety to some degree around it where you're not just flailing by yourself all over the place. So, again, I would encourage everybody who's, you know, maybe, of two minds to definitely check it out and definitely apply. Awesome, oh, that's. I'm like really happy to hear that. Your experience as well, especially like you know, really like using it, as some like feedback, like a space, also like to develop, like it's not that you don't need to send in a, you know, completely perfect Pro product or because, well, there is nothing. Like you're always going to develop, yeah, and so like, let's say, like Diego, like I know that you've participated in SCF 9, so that was a bit more of the recent rounds where we had you know all these different like phases as well, like what did what jumped out for you and what was your experience? Our EXP experience in SCF was awesome. First and foremost, + +[24:00] Like I think it was fitting for. Sam to talk first, because one of our first images of seller- other than going on and reading a little bit about what Stellar does, what Sam platform through Stellar Global, so being able to dive further into Stellar Global and see some of the previous projects, what other projects on the ecosystem are building, it- really gave us. That had to start and also, we connected with Sam. I think that connecting with Sam is never a bad idea and I think that everybody here can vouch to that. But our experience for stf was great for us. We are really passionate about what we're building. Like water Comm said earlier, like the payment space today is extremely inefficient and through Stellar we're able to build that change. And one of the coolest things is that we're all. If you're applying for the SCF, the concept is that you're building a long term project, correct, like your project doesn't end right + +[25:00] After the Community Fund is over and what that means is that you're going to have to raise a lot of money, truthfully, but one of the cool things is that the audience here is receptive to seller- know what is knows the power of. So you get that validation from the community that is a able to fund your project and then catapult you. I mean, we've been extremely blessed post SCF and I think that this is also credit to, LEAP Global. We got into the Berkeley blockchain accelerator which, as I said, leap Global paved the way for us in that side, but all the things wouldn't have been possible without the validation of the community. So the community saw we were building, they appreciated us, they gave us that funding. It's giving us the ability to build and then it's open doors to accelerators, open networks to different investors- of folks that I look at and I'm like: how the heck did we get here in such short period amount of time, but it's + +[26:00] A cool thing about building. It's a cool thing about this community. As you mentioned, there's a lot of communities in crypto, but I think the cool thing about staler is that it's an open network. It enables inter interoperability, like beans is building something similar to what we're building, but the cool thing is that a beans user can send out for pay user. We can cash them out in our local country. We're an interoperable platform that really empowers entrepreneurs to collaborate together. And, yeah, and it really is most to credit to the SCF and what you and the team have built through here. So, thank you. Well, not only me, there's actually a whole group of verified members in the Stellar Community Fund that gives input and really, like, allows for that feedback, right, so shout out to our, the life force of the Stell Community Fund, our verified members. Sorry, all right, but yeah, no, that's really where that comes from. And yeah, I mean you mentioned how the that we got here. Well, I know how you got here + +[27:00] Through the stall Community Fund right and also through that, you did such hard work and you actually, you know, did a lot of things that set you up for success, and we're going to get that in that more detail later on. But first I wanted to hear a bit about from that, like before we dive into kind of the advice part. Nat, I know that Alfred PES mentioned you on the accelerator part as well. You started with the blockchain Berkeley accelerator, but how did you decide to go even into the Stellar Community Fund? How did you make that? Yeah, that, I guess. Yeah, switch know it's just to Echo everyone else. The community has been incredible, just fantastic, and growing every year too. I think you know when we first started, it was a little less well known with investors and other community members, but it's incredible now and especially allot of the work that Sam's been doing. He's been promoting us so much. It's been, you know, just unbelievable how much he's been over backwards to help us. So + +[28:00] We're very appreciative of that and you know Sam's out speaking to Davos and like promoting the work that Stellar's doing globally and that's only getting better. So it's really been an honor to be part of this community. I'd say Tori and I- Tori, co-founder of leaf- had a great time out in Meridian in Mexico and I think that was a really our first big opportunity just to see what the community looks like. And now that we're able to have more inperson events, I'd highly recommend anyone you know to attend those and a network and to see what's out there, what's building. And you know people are just supportive and helpful. It's a fantastic experience in community and so I, after leaving all those accelerators- we did a lot of accelerators- so people want advice on accelerators and bootcamps. We're happy to help, but brookley was a fantastic one and a lot of them don't come with funding. You know that's one of the downside. A lot of support. You know networks- we'll connect to investors- but when it comes to actually putting in capital, there's very few that do that and that's what was, I think, transformative for us with SCF is actually + +[29:00] Now leveraging some of that Capital to use and some of the early stage work that investors won't support and fund and it's all worked out for us as well. Awesome, yeah, no, that's great to hear. Yeah, because, like, you need both sides right, you need that support is like so important. But then you also need that early stage capital and sometimes it's difficult to get because investors like you actually like need to show a lot and that you've done a lot, and especially for early stage, like you know, you got to get started and SCF is like this: like you know, Kickstart funding, almost that no strings attached to don't need to pay back, you don't even need to work on your project anymore if you don't want to, we do encourage you and there is a guilt tripping if you don't do it. Just kidding from the community. But anyway, you know it's like this unique opportunity really to get funding and it's quite, yeah, you don't really see that very often, even in the blockchain ecosystem. So, well, great, it just great also to hear from your experiences. And now in the last like half + +[30:00] An hour or so, I really want to half well, the actually the audience really wants to know how they can improve their submission, as they're writing them right now for scf1 and this is very important. And so you've been through the process, you have worked so much on your startup and your company, right, and I really want to know now how you set yourself up for success in the SCF. So let's dive into to that, right, we start with here. I also have some audience questions and we can kind of a free flowing discussion here. But I think, like a lot of questions here actually go about the community engagement. Here, from Carlos, there's a question how to best engage with voting members of the community. So who wants to start? Yeah, I get that question a lot, you know. + +[31:00] I think that ultimately, you know, ultimately I feel that where people, let's say, make the biggest mistake is that you know well, number one: find something that you're passionate about. Right. I think sometimes people find you know business. You know they kind of look at the SCF as like as strictly from look I can create a job or money- the money side of it, right- And I just think that's the wrong way of looking at it, because you know, to really engage in the community, you have to be in the community. You know there's, you know, right now, in Stellar Global. There it's a different time zone for somebody, different right, you know. So it's like you know it might be 4 00 am to you, but it's 1 pm to somebody else- and etc. And so you really got to be engaged in the community. The only way you can be engaged in community is if a you're passionate about what you're trying to build. And to be passionate about something, you have + +[32:00] To really believe that you're solving a problem. You know, like I could, I can look at everybody here and can tell you that they've all been like actively involved in a wide range of conversations. You know we talked about liment. You know Fred is, yeah, he has liment, but then he's passionate about. You know, okay, like how is the code being built out? You know what's the know, what's, you know what's the structure are. Is the community, are these businesses? You know adhering to the centralization and some of the core components of the community. So you have to be, you know, really passionately and actively involved. And when you're involved in all these conversations and you know people are going to find you, people are gonna know who you are, they'll get to, they'll get comfortable with you. And so that way, when you do have an SCF entrance, it's like, okay, I know what you're doing, I understand you, you're actually trying to solve a problem. Now you can start building up that engagement. + +[33:00] So you know, that's my long winded answer for that. Yeah, I also. I wanted to add to that. I would. My advice would be to reach out early, simply because once the Community Fund is going on- let's say for the 40 days that it's going on- people are reaching out to the verified members consistently, and some of it may feel like spam, I guess, just to kind of say it bluntly, but if you're reaching out early and to different members at the end of the day, it means that you know, you're interested in connecting with these folks and the truth of the matter is that we've all been in the shoes and at least I can say it for myself- where somebody reached out to me and gave me a helping hand, like- and as I mentioned, Sam has been a great example of that- like we need the community, at the end of the day, + +[34:00] To believe in our projects and we're excited about what people are building, because the stronger that the Stellar network is, the stronger that our projects are. But my advice would be to reach out early to some of the verified members. If you see projects that you know correlate with some things that you're doing, reach out, ask advice and yeah, but I would, I think that the best strategy would be to reach out sooner rather than later, because once the Community Fund is going on, most of us are going through the projects themselves, reading them one by one. It's there's a good amount of projects and that might not be the greatest time to connect. So, yeah, just to Echo what Diego said, I think you definitely will get what you put into it. So you know, if you don't engage at all, it will reflect. And if you leave it too late, like he's saying, then you know you're not going to get the sort of engagement that you're looking for. So, engage sooner. And + +[35:00] For me, I think there are some people, and stand out when it comes to engagement, right, and even if you can just, you know, connect with those, the people that stand out right, who either give you feedback or you see them responding a lot to different people. You know I call those people the champions of the community right, because they're very engaged and so, even if you can either follow them or ask them questions, they to get a lot of reads as well. So I think you know having a strategy around that as well is also very important. Yeah, so I'm hearing here: like definitely like engage early and engage in multiple conversations, like you know, like don't like, it's not only like- Well, if you really want to maximize your chances- I think what Sam also mentions like, go in the actual like, go in Stellar developers, Discord, go in Stellar Global. Like participate in some of the overall like Stellar discussions too, if that like + +[36:00] Is relevant for your issue. Like I mean, in Fred's case, I know that he's been activate, active for NFTs and Stellar like so much he's in all these conversations, right, and so I think that's really exciting. Like you really get involved with the community and work on a problem that you're really passionate about. Further on, like on community engagement vouter- I know that this is another question, think that came up somewhere, but I believe you've been also really active on social media and I think that's one of the other things like not only like the Discord right, like all over. So, yeah, I would love to hear a bit more about that. Yeah, that's more my many years long enthusiasm about Stellar and its potential to change people's lives, basically so, and that gives you a lot of people that are enthusiastic about + +[37:00] The same thing. Right, and maybe to add to the last one is: what is engagement? Maybe that was what I was thinking- is I don't think engagement is like telling what you're building and then getting some response. I think you can- you're in a super early phase, so it's very interesting also to cut it in small pieces and get feedback from people on specific parts, so that the answer isn't very complex and also you're not really like trying to convince something, someone that you're building something cool, but just getting feedback which is already valuable, and you can do that on social media as well. I think that's also a great way to get a lot of feedback. Basically, like, what is the engagement of the things you're building and are people excited about what you're showing them? So I think it's a great way to connect with people. Yeah, I like that aspect a lot, not just like, hey, you don't need to convinced that your product is the greatest of all time. You can actually listen + +[38:00] Right, listen more and listen to their feedback and show also like I mean obviously, like it has to be constructive feedback right in the communities. It can go all over the place, but if it's constructive, listen and Implement and follow up. I think that's really important. And then community members like you have a, you build your track record with them. That's very important. Yeah, I wanted to add kind of on to what Walter just said and bring back up Fred as well. Is you know Fred? You know he was, I think, disqualified from the first his first entrance into the SCF and then, when, I think, his first time going through after that, I want to say he lost the SCF before finally winning and so this is somebody who was obviously been really involved. But you know, the key is being humble. Right is the fact that you know he was humble, he kept on. He's he going? Back to what was mentioned earlier. He took feedback, came back and reiterated: + +[39:00] Okay, let me try to explain it in this way. Walter, somebody else who's been highly involved in the community. You know, back in the day he had, like, the Stellar torch and was really influential in the early days as far as helping to build up the whole aspect of community. But, you know, shout out to him when he was starting off his project. He didn't come in with an arrogance of like, hey, this is the best product there is. He came back exactly exact how we saying it he was giving. He was asking for a lot of feedback. You know, here's a screenshot. What do you think of this? So it was this active engagement from somebody who's- you know, I would say, understands the network very well, has a long history within the community, but he still came out and did the work and that goes a long way. And so, yeah, just want to reiterate the importance of just being humble and really engaging and getting feedback from people and being genuine about it. Yeah, + +[40:00] I think that's really like how you really built that track record. I think it's not just a one Stop Shop, right. It's really like you can actually also submit multiple times to the SCF. Like you can start with a small budget and kind of get your foot in the door, ask for lots of feedback, even if your project is not quite finished yet, and then next time you can ask for more and kind of like show that you've implemented the feedback. So, yeah, especially with Fred, like enlightment, like it's quite interesting to see, like that, like the many times like they submitted, didn't win, but submitted again anyway. Right, and not only does that show a track record for the community, it also shows it for, like you know, SDF, whenever we have grants, right, oh, this Pro, this project, really wants to be part of the sell ecosystem, like they. It's like a qualifier, almost so definitely. I think that is really great to just keep going, ask for feedback and stay humble. Great advice, Nat. I see that you have + +[41:00] Your mic unmuted. I wanted to hear a have more from you about what your advice is to just quickly: no, I think it's all been said well and, yeah, I think there's no matter what you're building, somebody's already, you know, done something similar or solved a lot of your problems and I think we found a lot of support and resources. Certainly for the wallet business. There's a lot of those instellar. So if it's groups like Lobstr or one called rehive in South Africa that we were able to connect with early on to get advice and also to share back, you know, I think we were one of the few that had Incorporated USSD technology and we're able to kind of share sort of best practices, what's worked and what hasn't, or if there's other on andof ramps that we've had more success with, I think there's, you know, plenty to give back to. But I also feel like this is maybe an area that we could have done more with. I think the challenge that we had with our wallet is not a lot of our users are, you know, online on the internet. So it was tough to get, least you know, feedback from anyone within the Stellar + +[42:00] Community about our wallet because they couldn't download and use it, and I think that hurt us a little bit compared to some others that could say: look, this is the feedback, this is how we've engaged the community. This is some of the things that we've Incorporated. So I definitely think we could have done a better job earlier on re engaging with the community. So we've gotten so much support since then. But, yeah, I was definitely one I think could have strengthened our application. Yeah, I guess that's also like points out that there so such a wide variety in project and yeah, sometimes like maybe like beans, for example, you're having, like you know, you can really find your users in the community, but in other cases, like your value, like feed, feedback is going to be a bit different. So, yeah, I mean, and as that you were also talking a bit about, you know that- user validation and you've been working on Leaf Global for a while. What is some other advice that you would give to like new submitters to put in their submission? Sure- and this is, I think, something + +[43:00] That helps set us apart a little bit- and also with fundraising too, is just going from that idea to like a handful of you know test users, or just something real, theoretical, to actually like moving money and getting real live users and processing transactions. I think that is a huge hurdle to do and it's tough to get. The you know regulation, the licensing, the partners being able to move funds, getting KYC right, like there's a ton involved there. Going from zero to one is a big heavy lift. I think that it might just been timing that we were lucky with that, with our applications, that we finally got over some of those hurdles after like five or six false starts and, you know, get things going and then it would shut down and then another partner would get shut down, starting with another. So just the startup life. But I think that is really key to be able to articulate that. I feel like that really helped us a bit is like this is more than an idea. We've got you know at the time, I think five or 6 thousand customers that were processing you know 100 000 plus transactions. + +[44:00] Like we actually had good data on the flow and you could see it all was all on the Stellar explorer. So like that was fun to be able to then show is like we're actually opening accounts for people are using it. This is how the flows are, and so trying to get to that step as early as you can, I think helps. Yes, user validation, numbers, data like show data to convince like you don't like U. You can prove that your product works right, especially on, I mean, Stellar right? I mean, yeah, you can see it on StellarExpert. Like it's all recorded. You can't even fake it right. So I think that's really exciting. So I think that's a really great lesson for people. It's like you know, do your homework, get user validation early, like show what you've done, like prove your track record. I think, even if it's, as you know, little as just a few people like, if a few people really like your project and you know you're in early stage, there's a big chance that a lot of people will right. So, yeah, let's see + +[45:00] Who has anything to add to that like numbers, and how did you get to those numbers? Like, what are some like advice that you would give anyone here? Feel free to chip in. You see some people unmuting. I'll add to that because I think that's something that we've actually been dealing with recently and I love that point of getting from zero to one of the best advice that we got recently was that when your product launches, it should suck, and that's okay, because part of it is understanding what's not working, what is working and so on. So one of our biggest priorities as of late has been exiting beta and getting the product in the hands of people to see what's actually working, what's not working, what users like, what they don't like, and someone like me who's like details of like, every like. No, this has to + +[46:00] Be like two fonts smaller and it has to be in this corner instead of this one. To it has brought some like Perfection paralysis, I guess, to what we've been so focused and excited on building. But I'm just so excited to get this in the hands of people like we recently launched our anchor in our exiting beta, October 1st, and if I wouldn't have gotten that advice, which is exactly what Nat is pointing at, we would probably still be so paralyzed in the process of getting everything right. And it's understanding that your product is never going to be perfect in this stage, and that's why we're here. You know that there's no better Community to rely on feedback than this community, so I just wanted to add those points to that. Yes, so you're hearing, your project should suck if you're submitted to the SCF. No, but I mean that's obviously. I think that's a really great piece of feedback and that's what I've been hearing too. Like it doesn't need to be perfect the first time. Your MVP should be like super, like simple, super like + +[47:00] Basic. Doesn't need to be perfect, don't over engineer and just get it in front of people. I think that's great advice. Let me see we have a. Anyone wants to add to that before we go into next section? Right, no, so the project isak. Very good advice. I love that. But obviously we're here to make it better, like, and it's always going to be re like iterative and you can even work, as was mentioned here, partner with companies, like make it better, like there's new opportunities if you just get it out in front of people. We have a question here from John Woodard and this goes a bit more into the demos and kind of like graphics and whatnot. So what sort of like demos are expected and what are the? What are some advice that some of the panelists would give around this. I + +[48:00] Mean, oh, go ahead, Walter. Yeah, what I would say is that I don't know if there's any expectances. It depends on what you're building. But what I would say is that be careful to look at design. No matter what you do. If it's not your presentation or if you're building a consumer facing product, at least take a look at your design. If you don't have a designer in your team, find someone that can help you. Even if you do it through, I don't know, Fiverr or something. Make sure that it stands out, it looks good, but also use it to structure what you're building, because if you go through design process, you will run into challenges. It will improve your product, but it will also make it easier to explain to people why what you're building is unique. So I would say, no matter what you do and what you show, be careful to take a look at design as well. Make look good. + +[49:00] Yeah, making it look good is very important. I think, like a lot of people like- including me, like we like verifi members read a lot of submissions and it's nice to actually see some visual assets. And I know beans you did that really well. Beans actually had a really great demo. Unfortunately, my Discord recording skills weren't as good so I cannot reproduce, but definitely the next candidate pitches we should be all set on that. Let me see like I actually you know like the design was really good on all of these the project panelist that are here and definitely check it out on the project site, on seller Community Fund website. Let me see. Because I offered pay, I also really liked your designs as well. Did you do any? I was actually gonna gon to mention exactly what you of that. The cool thing is that you're able to go through some of the previous projects that are winners + +[50:00] And see what they put together. And that's something that we did for sure before we submit our application was go through some of the previous winners, see what they had done and, to L point, I think design is important, one of the things that we did and ultimately, as we said, the whole concept with our projects is to exit out of the Community Fund stage and, you know, to grow them. So you're eventually going to have to build out a pitch Teck and a lot of the questions that come out of the Community Fund are questions that you need to have in your pitch check. So what's the problem that you're going after? What's the solution? So maybe even uploading that slide from your pitch deck to give it a graphic that maybe adds a little bit of twist of what your brand is, what your product does, etc. Might allow to, might allow you to stick out versus other projects, because when you're going, when you when there's so many applications that are just all text, it can be pretty hard + +[51:00] To just to go through so many, whereas maybe you see like, wow, these people have a really cool brand, they have a really cool way of communicating this to others and it's something that'll stick out. So definitely go through the previous winners on the Community Fund site and see what they've done. As IA said, beans did a great job as well. So definitely go through them and take a look and use them for reference. Yeah, may maybe to add one thing is that the pitch which we did, that wasn't recorded. What really helped us is to pre record our demo. So what we did is did a live demo, pre record it and then talk over it during the pitch, so that you're not focused on clicking all the buttons and all things that can go wrong during a live demo. So I think that helped a lot to prere Rec it and then put it in the slides. Yeah, that was actually a legit presentation. + +[52:00] Like we were like everyone who was watching, like Tyler, like everyone on my team was like whoa, like pre record like this is like because everything obviously went well right. It's like you were, you looked like you were doing Livewire and we were like really impressed. But yeah know, it's like a. Those are like some great tactics too. Let me see we have a few minutes here and I want to go onto some other questions as well. So another question I get often is: what's the best strategy for the grant amount and I know that you know we've actually changed budget guidelines quite a bit throughout the SCF rounds. Right now, like we maximum amount is 200k and but we have different projects like of different budget Scopes, like depending on the project scope. So like we have like beneath like 10 000 for small scope projects, beneath under like 50 + +[53:00] 000 worth of XLM for kind of medium scope projects that are really like for new project, but also we have under U 200 000 which is really like people, that of existing companies that want to scale a new product or like Kickstart a new development on that front. So yeah, here it says what is the best strategy to kind of get in for into the Stellar Community Fund and I know that quid, like I know that you haven't had any previous can really experience right in the Stellar community and you chose a little bit of a lower a budget on the lower end? Can you tell us a bit more about your strategy there? Sure, so I mean, when we applied we just needed or were looking for funding to even just build right, so we were looking for funding for Developers for maybe a bit of a marketing campaign, and so we, I think we did less than 50 000 if I remember correctly. + +[54:00] Yes, and you know, based on advice as well, you know, just looking at what other people had done, and I mean, first of all, not to just deviate, but can I just do and can I just brag on? But can I just do and can I just brag on anchor, because I'm telling you, look, if she's on your corner, you are made right, so let me just quickly brag on her and just say that everything she said we should do, we did right so. But there was a lot of advice around. You know how to sort of what to do and everything. And she has been such a phenomenal support and just supporter of just our. You know our product, what we're trying to build, the vision and everything. So, just going back to your to answer your question, so again, we asked for a smaller amount and we actually got it. And I'll say even now, when everybody's talking about. We've been very misly with even those funds right, because we're using it to, we're able to pay for developers. We were able to do some marketing campaign, like I said, and we still have some left that we're still, you know, managing right, and so it's just been phenomenal + +[55:00] With just what we've been trying to do. And so anybody who's coming in, unless you already have like an established product or something that already has like a lot of traction, I would say, start small and you know, SCF, the beauty of the fund is that you don't just get it one time and you're shut out. You know you can always come back. Like everybody has said, once you able to prove the concept and you know get more traction, you can come back and ask for maybe higher amount and so don't feel like you have to ask for the moon first, go around. You know, if you know, ask for what you need, but make it manageable enough that it within the community would be able to say okay, yeah, this is, this makes sense. Yeah, I think that's great advice also to really get your foot in theuring and get traction and just build that and you can always submit again. Right, all right, well, we actually like the time flies like it's already almost done and anyone here? Any last advice for scf1 submitters: + +[56:00] Feel free to unmute. Yeah, just have fun, you know. I mean there's a lot of great opportunities out there. I think there's the. I mean, since all of us have entered, there have been continual upgrades, continuous new countries that have been really expanding their use cases, and so just be creative, have fun and do it. Yeah, I think that's a great point. Just don't only do it to win the funding, but just have fun, get feedback, interact with people, improve, use it to write down whatever you're building, validate your ID. So I think that's very valuable already. So, yeah, great Point. Yeah, I would also chime in and say, well, sorry about that. I would also Chim in and say: don't leave it till the last + +[57:00] Minute to fill out your submission. Right, start working on it now so that you can sort of- you know, kick the tires around it, you know, let people around you sort of look at it, read it, give you feedback, put that in the in your submission and then just to make it as Crisp And as great as it can potentially be. Thanks, going to say something similar, I feel like it's: the standards have got a lot higher since we went in, so up. Your game, the professionalism of the, you know, of the applications, have gotten so much better. But, that being said, yeah, there's nothing to lose by just applying and getting some great feedback and, especially if you're new to the community, Jump Right In. It's really a supportive one and I'll add that if you don't win one time or two times, keep applying, because I think that a large part of being a Founder is grit and being relentless. So the community loves to see two, three time people who apply + +[58:00] And I think that has a lot of value. So, yeah, thank you so much everyone for that advice. I think that's. Yeah, it's was really great to hear from all of you and really got into the examples of, like you know, the generic advice that we give all the time. Yes, that's true, our application has become a little bit more complicated, but that's a good exercise for you to l write down your business. You know I'm trying to. Yeah, we're trying to streamline it every round. The stf changes every now round because we grow with the needs of the community and the growing Stellar ecosystem, which is really exciting. We're also having some, you know, sneak peek. We're having some really exciting updates for scf2 that I'll be sharing in the next few weeks. So definitely stay tuned, but in the meantime, if you want to keep up-to-date with this and learn more about the Stell Community Fund. We have an exciting like website that we recently renewed. Shout out to Charles from + +[59:00] Our design team. It's very great. It's called a `communityfund.Stellar.org` so definitely check out and check out the projects the our panelists here and many more exciting projects that have in the past submitted to SCF. We, as mentioned here, we have a exciting stallo Community Fund, Discord, where you can find many of these of our panelist- actually all panelists here- and many more on Discord and ask questions and whatnot, and we have recently also added an SF handbook. So if you're like, really want to know the details of how everything works, definitely check that out. You can find it on `communityfund.stellar.org` and it will answer most, if not all, of your questions. And then we also post a regular updates in our Stellar Community medium, so definitely check all of those out. It's 2011. Thank you so much to everyone here participating. It was great to hear from you + +[01:00:00] And I look forward to everyone's submissions to scf1- awesome. Well, thank you so much everyone. + +
diff --git a/meetings/2022-09-16.mdx b/meetings/2022-09-16.mdx new file mode 100644 index 0000000000..2c074c6940 --- /dev/null +++ b/meetings/2022-09-16.mdx @@ -0,0 +1,138 @@ +--- +title: "Info Session for SCF#11 Startup Bootcamp" +description: "Overview of the SCF#11 Startup Bootcamp, including program structure, design sprint methodology, community involvement, and how teams can prepare to participate." +authors: [anke-liu, jake-kendall] +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +DFS Lab and the Stellar Development Foundation hosted an info session to walk prospective applicants through the SCF#11 Startup Bootcamp experience. The discussion focused on what teams can expect from the program, how the bootcamp supports projects building on Stellar, and how it complements the Stellar Community Fund application process. + +Speakers shared practical details about the design sprint–based format, the role of mentors and community members, and the types of outcomes teams typically achieve. Past participants also reflected on how the bootcamp helped sharpen product focus, validate assumptions, and accelerate progress. + +### Key Topics + +- Overview of DFS Lab and its hands-on approach to supporting early-stage startups +- Mission of the Stellar Development Foundation and the purpose of the Stellar network +- How the Stellar Community Fund supports builders through open, community-driven grants +- Structure of the Startup Bootcamp and its multi-day design sprint methodology +- Defining problems, mapping user journeys, ideating solutions, and rapid prototyping +- Importance of user testing and unbiased feedback during product development +- Role of Stellar engineers, mentors, and community members in supporting teams +- Community engagement, feedback loops, and visibility during the bootcamp +- Examples and testimonials from previous bootcamp participants + +### Resources + +- [Stellar Community Fund Overview](https://communityfund.stellar.org) +- [Design Sprint Methodology (Sprint Book)](https://www.thesprintbook.com) + +
+ Video Transcript + +[00:00] Hi everybody. Actually, can you allow me to present my or to share my screen here, or the host? I can help you share from my end. If that's okay, I think I can. Okay, I can share as well. Great welcome everybody. As gift said, we're going to introduce you to DFS Lab but, more importantly, the bootcamp and what to expect from the event. Coming up, and inca from Stellar is also going to say a few things about how bootcamp fits into their, the community fund, and also you know a little bit about what to expect from their side. So here's our + +[01:00] Agenda. We'll give you kind of a quick overview of SDF- and it's something fun, little bianca- and then I'll give you a bit more about the bootcamp. So just to kind of quickly give you an introduction to who we are at DFS Lab. We we're an early stage investor in the African tech scene. We invest very early, often at precede, in startups that are digitizing the African economy. So we invest in a lot of marketplaces, e-commerce, B2B e-commerce, social commerce, and everything along kind of the value chain of commerce, in Africa. And we have kind of a. We're a very research driven, thesis based investor and our belief is that, you know, while a lot of attention is being paid to fintech and some of the other areas that investors are excited about, we think the real prize is actually using some of those tools to digitize the economy, root and branch and go from kind of cache based and offline to digital based and online. + +[02:00] So you can tell. Because of that, belief, you can probably understand why you know, we're very aligned with Stellar, because I think they have kind of a similar belief around, you know, digitizing the economy and making finance seamless and online and all that. So, and we're, as an investor, we're very hands on we don't call ourselves an accelerator program because we don't do batches with kind of a fixed curriculum. We actually focus on the individual needs of each company. But we're very hands on we spend. We have, you know, a few folks dedicated full time basically to supporting our portfolio companies and we're basically there to help them usually go from the early precede phase to their first seed round. That's where we generally tend to intervene and we can't say too much about it yet. But + +[03:00] There's more coming in the partnership area between us and Stellar around. You know how we can help support more companies with more funding and things like that. So I can't say too much about it yet, but should be some exciting stuff coming up in the future. So that's us. We have team in Nairobi. That's gift, and also Joseph, who's there as well. But we also have people in Kenya and south Africa. I'm in paris, steven, other partners in san francisco, and we bring a whole host of different skills. I won't go into everything, but I think that gives you a little bit of a an intro to who we are as an organization. I could do you want to take over for a sec and give us a little bit more background on Stellar and the community fund? Sure thing, yeah, that makes a lot of sense. Yeah, so for everyone who doesn't know me- and it's neon desk. My name is ankur and I'm a program manager at the steli development foundation, or SDF, or short bar. We're going to say a + +[04:00] Lot of abbreviations today: SDF, scf- so just preparing you for that. And so SDF is a non-profit organization with a mission to create equitable access to the global financial system and as a non-profit we shepherd the development and growth of Stellar, which is a decentralized, open source layer, one blockchain network that connects the world's financial infrastructure. So Stellar is really a back end, so to speak, to power apps and services, where anyone can issue, store, transfer and trade all sorts of assets. Seller also has built in like account logic for creating accounts and assets, signing transactions and tracking balances. Overall, like Stellar is really purposely designed to streamline financial services and really unlock the world's economic potential by making money more fluid, markets more open and people more empowered. So there is like an exterior is an open source, + +[05:00] Which means that anyone can build whatever they desire on the network with no application or permission needed. And speaking of building on Stellar, I lead a program called the Stellar community fund, or scf for short, the second abbreviation- which is an open application, a grant program hosted by us that serves us the springboard for many businesses and developers building on Stellar to turn their concepts into tangible products and services based on input from the Stellar community. We've- actually it's quite an old fund, like we're together with our predecessor. It's been around since about like 2016 and has distributed almost 200 million XLM, which is Stellar's native currency, to hundreds of seller based projects based on community input. So that's pretty exciting. You know what's different? I know that there are many grants out there for + +[06:00] Blockchain. So what makes us different? Three reasons. So on, at scf, all kinds of use cases and project scope sizes are welcome and where other grants may primarily focus on innovation, on traditional like services, scf also funds like DeFi, NFTs, community education and developer tool projects. Also, the community is actively involved in guiding and supporting the projects, as well as voting for the projects they want to see earn a grant. Also fun fact, scf grants have no strings attached, so this means that grant recipients decide how to spend their grant and don't need to pay anything back. So pretty sweet, I would say so. Saf really operates biannually in form of rounds, and right now the scf is welcoming submissions for its 11th iteration. It's been 11 rounds already with a + +[07:00] Total excellent pool of 8 million. So any developer, team or company building on seller can submit to the scf and request funding up to about like 200 a thousand dollars worth of XLM, which depends on their project's code. Aside from funding, participating in scf allows you to gather useful feedback from the community, gain traction in the ecosystem and also opens up unique opportunities for growth. So, yeah, and for the first time ever, let's skip, let's go to the next slide, jake. Yeah, for the first time ever, submissions that get selected to participate in the scf and are eligible to participate in bootcamp can announce now participating- sorry, can now participate in a virtual startup bootcamp from November 1 to four. That will really like kick start their journey on + +[08:00] Stellar or even, if you're already in Stellar ecosystem, like help with a particular problem that you haven't been able to solve before, with the help of experts and facilitators from both SDF, DFS Lab and the community. So, yeah, over the course of a few days, participants will work with these facilitators to find a problem, storyboard a solution and develop a rapid prototype before getting feedback from the Stellar community fund, from the Stellar community, But jake will tell you much more about that. Just so you know before, if you want to get considered for participation, submit your style based project to the staff before September 25th, and this is a little over a week, so it's really coming up. So that is the deadline before September 25th also. I saw some questions in the, in the Q&A section here on the Zoom. So definitely add your + +[09:00] Questions there and we'll have a little a q, a session, like right after everything, like at the end of the session. Right, that's all from me and yeah, jake, take it away. Thank you okay. Yeah, that was great. So yeah, I'm gonna give you kind of an overview of what to expect if you come to this event. And we're really excited about it. We always. We've done a couple of these with Stellar already in the past and they're super fun for us because we get to see new companies who are, you know, often new to Stellar. They're trying to create, you know, some kind of an innovation or crack some difficult challenge in the financial services space, and it's a very kind of a intense interaction where we really get to go deep in what they're trying to build, what their challenges are, you know, etc. We + +[10:00] Get to meet their team and all, etc. We of thing, and so we really like that kind I think the seller team them, and for the same reasons. You know, if you just really get to dive in- and for us it's also great that we get to be hands on with each company, so we're not just kind of like lecturing about, you know, the future of fintech and all that kind of stuff. When you go to a normal conference, the bootcamp is really you and your team coming and doing work in a really intensive but structured way, with help from a whole bunch of different people- us, experts, Stellar, etc. So it's not- it's definitely not- something you want to think about like a typical conference or something like that. It's really you working on your product, which is, you know, which is what you should be doing if you're running a. So I'm going to start with kind of just high level. You know almost like the philosophy of a design sprint, right? So some of you probably done design sprints. You know we didn't invent them and we won't be the last to use them. But we + +[11:00] Have kind of our own version of the design sprint that we do and the real idea is that you test and prototype a solution in just five days. And you start with: you know a problem, you define, you know the challenge that you're trying to overcome, and then we have kind of a step by step process, once you've defined that problem, to go from there to a realistic prototype that you can put in front of users and get their reaction. And that's the critical thing about a sprint- the design sprints- is you're trying to avoid building an actual product, which sounds weird, right, like why are we teaching you or why are we encouraging you to avoid building product? Everyone else is telling you, yeah, build product, that's the most important thing. But what's important is actually to build product that people want, and this approach of a design sprint is a way to fast forward into the future and say, okay, how are people reacting to this product before we've actually built it? So we don't have + +[12:00] To waste the time building it if they don't react in the way we hope, and it's a great methodology. There's a few links here. You can read more about it. I encourage you to do that. But that's the fundamental philosophy of what we're trying to accomplish with this and you'll be surprised how realistic a prototype you can build. So here's the process. Now some of you have done, you know, sprints based on the sprint book which we had to link to, on the last slide or some of the other methodologies. This will look a little bit different, but it's okay. You know a lot of the same steps are there and the same fundamental outcome is there. So, because we kind of have limited time and we're planning to- we're assuming that there's going to be a large number of participants- we're going to try and have each participant team define the problem they're trying to solve before you show up. So that'll be important to know is that, you know, spend a little bit of time before we + +[13:00] Start on day one, actually figuring out and defining a problem. We're going to send you some exercises to do to make sure that you've actually done that, because it's actually kind of a common problem we see with product designers and founders where they'll say, oh, I know what the problem is, but they actually haven't thought it through in enough detail to really have a high resolution version of the problem. So that's what we're going to ask you to do before the event starts. Day one will then be to say, okay, let's take the problem that we have, map out what the sort of- you know, original customer journey is and think through where in that customer journey we might want what do you sort of current or standard- you know industry standard- customer journey, and then where in that customer journey or that sort of experience do we want to intervene with the prototype that we're creating today, right? So that's day one. It's really mapping out the problem in high level detail and figuring out + +[14:00] Where we want to intervene. Day two- and I'm going to show you some examples of each of these in the next few slides. so basically, you'll come up with a map and I'll show you the map in a second right. Day two, you're gonna focus on which solutions you know you wanna use in your intervention and you're gonna generate actually a whole bunch of solution ideas, you know. So you kind of widen the funnel of ideas before you then narrow down on which particular one you think is the best. And that process of widening and creating new ideas we have a whole bunch of- well, not a whole bunch, but we have a few steps and exercises to do to make sure that you really stimulate your creativity and bring in, you know, the best ideas from the team and make sure that you don't jump on the first idea that comes, you know, comes to mind, but you actually explore really deeply this set of ideas, space in your head that you know sort of the set of the possible, if you will. And then + +[15:00] We have a similar set of exercises where, okay, now all the team has kind of put all the ideas out there. Now we're going to narrow down using some anonymous voting and some ways of giving feedback that try to avoid biasing the solution based on who's the most convincing or who the feedback comes from. Often, you know, people think that you know, because the CEO said, oh, I like this one, then that's what we should do, and at the end of the day, the CEO probably has to make the decision. But we do think that it's better to make sure that everyone's kind of perspective and thought process is kind of treated equally in the initial part, so that you know you kind of narrow down the designs based on best thinking and not on who's, you know, the most senior or the most convincing. You know, some people just have a great way of putting it but that may not be the best idea. So day two is really going to be about focusing down on, okay, what's the sort of + +[16:00] Product approach and solution that we want to create. The next step after that- and that's kind of end of day two going into day three- is first, you're gonna storyboard your prototype. They're pretty high level of detail. Now, people always want to resist this. They, you know they found their idea on day two. They want to jump right in and build it, but we really push back on that. It's not a good idea, and the reason it's not is because, basically, when you jump in and try to build something, you think you know what you're building, because you've got this idea in your head, but that idea doesn't have enough detail to actually be a coherent feature on your product. And so what happens is you start building, you build, you know part a and part c and part f, and then you realize you gotta fill in the other stuff. And as you start to fill in the + +[17:00] Other stuff that you weren't thinking about, you know, when you just kick the thing off, you realize that I've got to redo part a because it doesn't fit together better. You know it didn't fit together well or doesn't make sense. Now that I've thought through part b and you start creating kind of a non linear problem to solve, where now everything is interrelated and you have to change this when they have to change this and slows you way down and you end up having to often sometimes just completely restart and do the whole thing from the beginning. Right, most of you probably experience that at some point when you're dealing with a complex system like a product, right? So what we say we encourage people to do- is spend the time. It only takes a, maybe two hours to really create a detailed storyboard that says okay, at each step. Here's what we're going to do. Right, there's going to be a button. That button is going to take you here. You know it's going to. You're going to do something. You're going to put in some data. You're going to. do, right, all that stuff will be thought out, including pretty high level detail on content, if there is + +[18:00] Any. So if there's wording, if there's text, if there's images, you can say, okay, we need an image here. We need to. You know we need some wording here. Then you go into the prototyping phase. You've got a. really detailed map, you know, that says, hey, you're going to do it's just like a recipe that you follow and you are going to be so much faster in building your prototype if you've got that recipe laid out or that storyboard. So, and then in the prototyping phase, that'll be day three and it'll actually kind of, you know, go beyond day three a little bit. You'll have a bit of extra time on day four as well, where you'll be prototyping. You will have support from us, but in particular, you have support from the Stellar team. There'll be some engineer folks who you know the apis and all the ins and outs of the, of all the different seller technology, so that + +[19:00] You know you won't get hung up, so that read through documentation and things like that. I mean their documentation is actually quite good, of course, but in any case they'll be folks who can help, you know, live in person as well. And there's gonna be the whole community and I'm gonna get into that in a second. This is just kind of the sprint aspect. There's gonna be a kind of a whole extra layer around all this, which is the community part of it. So, once you're done with your prototype, you're gonna get in. You're gonna then test it with users, and that's actually something new we're doing this time. If any of you were at the previous bootcamp, we didn't do that, but it's supposed to be a integral part of the sprint. It's very, it's actually the most important part. In fact, it's really why you do a sprint, a design sprint, in the first place, which is to, you know, find out what people think. And again, we're gonna have kind of an integrated process where people + +[20:00] From the community can be your user and give you feedback on how you know how the experience goes when they use your product or test the feature or what have you. And so you're, we're gonna do some workshops on how to do that testing in a way that doesn't bias your user. It's very easy to bias someone who's testing a product or a feature and they're kind of looking at you see, like you know how do you feel when they do x or y. They don't want to hurt your feelings if it's clear that you're the one who built this thing. You know they don't want to say it's terrible, or that they don't like it. Or they give you a bunch of really positive feedback but it's not. You know it's not real. It's not their real gut reaction. So, and there's a lot of other ways you can buy us. You know, by the way you present it, by the way you talk about it, by the way you know the situation you put the person in. So we've got, we're gonna. We have a workshop plan where we're gonna give you some tools + +[21:00] So that you know how to do some tools, test session with a prototype and how to collect the feedback and how to organize that feedback into sort of a useful output. So that's the basic sort of step by step process of the sprint itself and you know it, like I said, it's a lot of fun. It's very kind of both draining and energizing, because it's pretty intense. There's a lot of steps and you use in your brain a lot and you kind of dump out all your creativity and make decisions and then build stuff. It's pretty cool and what we see is that often within the within each company, not only does it help, you know really rapidly go from you know some particular problem you're having with your user base or your you know user experience or whatever it is, or you're trying to, you know improve, or if you're fundamentally new, maybe you're just trying + +[22:00] To build a very basic first version of the product. If you don't have one yet, it really you know it definitely helps do that really quickly and in a way that, like I said, is based on user feedback and so much more validated and much more likely to result in the final product that you built. You know being useful and liked by your users. But it's often also a way- because you spend, because you're so focused and intensively working on your product and you're doing it in the context of seeing a lot of other teams do their products, think through their challenges and solutions, etc. You're interacting with a lot of people who you don't interact with on a daily basis but who are, you know, experts in this area. You know our mentors and facilitators, us, the Stellar team, the community members. You're gonna have some. Really. I think most of the time we see teams having these really interesting kind of revelations about their strategy, their product, you know, their pathway to market, their distribution, all those other things that don't necessarily fit + +[23:00] Directly into the storyboard that you know or the product that you're prototyping, but that fit in around it. So it's a, it's a really around it. So it's a really valuable process, I think, and we see a lot of companies getting a lot of value out of it. I wanted to just kind of quickly give some examples just to illustrate what I was talking about. So remember I talked about first day- you draw out the pathway, the user pathway, and then you figure out where you want to focus and intervene. So this was a, this was one from a customer who was doing a cash in, cash out at a mobile money agent network in Kenya, and you can see kind of all the different process steps that are going on. You know they're entering their number, they're talking to the agent and I won't go through everything. And then the team wanted to forward on- sorry, wanted to focus on these two areas that are circled in green. So that was sort of the end of their first day, basically, of sprint. They came out. You know this + +[24:00] Is what they come up, know this other steps and other things they created, but you know this was kind of the main final output and it basically guides the creation of their solutions the following days to say, okay, we know the process that where we fit in and we know what we want to change. That's essentially what you come out of day one with and what we're going to do. We're still kind of fine tuning how we're going to have the interactions with the community members and things like that, but we are going to find a way that you can, we are present and show this to the community, probably via Discord, and either get feedback- people may vote on which kind of problem you know problem diagram and problem choice that they like the best, et cetera. So we're going to have some kind of like really cool ways where you can interact with the Stellar community and really get, you know, even more feedback and more ideas and + +[25:00] More sort of expert help with what you're doing. So this is the output of day two, or it's one example anyway, and what you can see here is we got this three pages. I think each one is supposed to be a looks like an app screen on a phone maybe, and you know, you're just kind of going through a sign- looks like a sign up process, right. There's a sign up button on the first page and then there's a- you know, looks like there's some marketing lingo on the second page. So it's kind of an onboarding process, right. The next one is maybe a text message based interface. So anyway, you know, I don't know exactly what this storyboard was supposed to be, but it was supposed to be an onboarding process for a savings app. And you can also see how there's these blue and pink dots. You're probably like: why are those all over the place? So that's part of one of the steps in the process is people on + +[26:00] The team and also other people outside of the team can vote on which things they like best and which things they like. You know that they think should go into the final version of the prototype and that's non bonding you know, it's kind of like a thumbs up on facebook or claps on medium. You know it's just kind of like people express their opinion and that gives you kind of this heat map of like: oh boy, there's a bunch of dots over here on the. You know, in some part we should probably keep that. And there's no dots in this, upright. No, people didn't seem to care about that part, so maybe we can cut it out, or something like that. And eventually you'll have a couple of these, because each, you know, there'll be more than one of these storyboards. Initially, as remember, I said we're going to first start with a bunch of ideas and then narrow it down. Eventually, the decider on the team will they make a choice and they may choose one storyboard or one sort of three page you know thing, or they might reconfigure and mix and + +[27:00] Match. You can do whatever you want, but the idea is to use this heat map and the feedback that you've got from the team to kind of choose. Okay, we're going to do this particular idea for an onboarding or for a transaction process or whatever you're doing, yeah, so that's the first thing, that, again, community is going to get involved. Here too, same as before. We're still kind of fine tuning how exactly that's going to work, but they're you're going to be able to kind of show and get feedback on some of this stuff and we're going to have, I think, some prizes and voting on, like you know, best ideas and things like that. So day three, I believe, will produce this storyboard. So this is an example of storyboard. I was talking about it's not super pretty, you can see, but it gets the job done. So essentially, these are supposed to be a series of app screens. You know you can see the wireframes and in a few cases they just photographed hand drawn + +[28:00] Diagrams and put them straight in there. That's totally acceptable and cool. Then they also put, you know, you can see- kind of the step by step with the yellow sticky notes, where each one kind of says: you know what's happening at each step and you get a little bit of the kind of the menu options and things like that, and you know, basically it's kind of like a transaction flow on some level. And so this is, you know, doesn't have to be that. You know that you don't have tons and tons of detail, as long as you know what's happening on each page and all the pages that you have to build and how they kind of interconnect to each other. You know that's what you need to get done, and once you have that then it's really easy. Then you just you know, then you're just heads down, you're building. The other thing we usually do is wire diagram for the transaction process that's happening on the back end and that's you know where you're gonna be interfacing with the different parts of the Stellar system and you're in your own. You know your own system + +[29:00] As well if you have one, and so this usually helps kind of clarify both the information and money flow. If you know if it's a financial transaction of some kind that's happening on the back end- not everyone needs to do one of these, but most people do. It's usually a valuable exercise, doesn't take very long. But again, same idea. It's basically you're trying to just map everything out so that you go to the building mode. You're not drawing the. You know the blueprint diagram as you build at the same time, right, you have the blueprint, then you build. Now I mentioned and alluded to some of this stuff earlier, but one of the things that's really cool about this bootcamp is- and we're really excited about anyway- is we're introducing a bunch of new features and things we haven't done before. And part of the idea was, you know, when Stellar came to us and said, hey, you know, we've done smaller bootcamps before, we have a + +[30:00] Lot of one on one stuff, but we really want to make it so that each company has a chance, so that a we can bring in a lot more companies and make it more available to the entire sort of community fund participants and folks who are applying, and we want to make it so that it interacts, so that all of them get to interact and the process is interactive with the entire community, which is super cool. It was a little bit daunting, I have to say I think it was like, hey guys, you know, go do that. And you're like, yeah, that's gonna be complicated, but actually it turned out. It was a great idea and we're really excited that we did it because now we come up with a whole kind of a new system that we think will do those two things a lot better and it's gonna be a lot of fun. So a few things to expect from this new approach. One is, as I've mentioned a couple times, there's gonna be help from community members to test ideas, get feedback, get advice, etc. And the community members are gonna be kind of like activated and online and available and kind of looking in, you know, on what's happening. We're, like + +[31:00] I said, we're still exact. We're, like that are still being developed. But one thing we're trying to do is pair each team with a community member that has relevant expertise and a desire to kind of maybe some you know- passion for the area that they're building on and sort of pair you so you get an extra person on your team to kind of give new ideas and help move the process along and be just like on a sounding board and things like that. So that should be pretty cool. You get to meet someone new from the community- another goal of this and, like I said, you'll be showing the outputs of your process as we go along to the community and getting feedback. But you also, of course, being at the end kind of showing your final product is a great way to expose what you're doing to the broader community. Remember, the scf awards are a choice by the community itself, not just the Stellar team, and so you really you know you want to expose people to it, make sure they know what you're doing, and that'll help a lot when it comes time for the voting on, + +[32:00] You know, potentially on your project. We've created a bunch of self directed modules and videos for each step of the sprint process. So normally in the past, basically I would go up and explain everything and then you know you, the sprint participants, would do it, and with a facilitator. Now we're trying to make it so you don't need to have a facilitator and you don't need to have me talking to you, although we will have some points where you know myself or somebody from Stellar, we'll jump in and just kind of talk to the whole group. But we're trying to make it a lot more self directed so some teams will probably be able to go a little bit faster. Some may, you know, want to take more time in a particular area, and so make it a lot better for everything, for the suspense you won't be forced to move along at the same speed as everybody else. We're also going to have kind of some fun gamification, quote, unquote- where, you know, as the community sees what people are doing, we're going to kind of create some ways for them to react and, you know, promote certain ideas or teams, ID, you know, products. + +[33:00] We want it to be positive and inclusive, not, you know, like there's no down voting, there's only uploading. But you know, and there'll be a lot of ways in which to get recognized, not just, like you know, does everyone like this or not, but like you know the most improved or the kind of the coolest ideas in different kind of categories and all that kind of stuff. And Stellar has been generous enough to give small amount of money for cash prizes. So some of the winners in different categories are going to take a little bit of cash home in your pocket. it's not, you know not tons, not, it's not retirement money, but you know just a small amount to kind of like get people's blood going a little bit, make sure everyone's engaged and trying to put your best foot forward. We're also gonna have global time zone coverage. So, basically, you can do this sprint from anywhere in the world and we will have facilitators available- at least one facilitator and hopefully one technical person + +[34:00] Available at all times, basically, which should be exciting- and a lot again it'll allow us to have companies from all over, you know, entrants from all over the world who participate, and to be a lot more participants. Last time, I think we had nine, and this time I don't know how many we're gonna have, but it's looking like, you know, 30, 40, 1950. So, yeah, it's gonna be exciting, it's gonna be cool. There's gonna be a lot of different ideas and products and cool new technologies. So we're definitely looking forward to it. So, after all that, you know, this slide just kind of gives you the very basic boil down, boiled down version of what to expect. You come in with an idea and a problem you want to solve and you leave with a validated prototype product and maybe some cash. So it's a really valuable experience and if you've ever done a sprint, you'll know what I mean, because almost you'll know + +[35:00] What I mean, because almost everybody who goes through them is like, oh, I'm gonna do another one right away and, if nothing else, you'll learn the methodology and you'll take it home with you and you can do it again. So, gift, I'm gonna hand it back to you. Thanks everybody. I don't know. Do we shoot you guys want any questions or other questions? Maybe that I should answer? I haven't been, So, yeah, there are actually a bunch of questions on the Q&A chats, but let's give room for emmanuel to share their experience. As I mentioned earlier, emmanuel was a past, you know, participant at one of the bootcamps we had and you know also one of the winners, same with the crop, the cash team. So they will pretty much be sharing what their experience was like, as well as maybe also give us a bit of you know what they've been up to since the bootcamp. So, yeah, + +[36:00] Over to your manual. Should I stop sharing? Are you guys gonna go on video so people can see you live? Or, sorry, gif, we lost you. Should I stop sharing? Yes, you can stop sharing. Yeah, where are you guys? Okay, I can go now. Yes, you can all. Right, awesome, thank you so much. Gifts- thanks so much. I felt like what would? Coffee came while he was giving the presentation. A lot of you know not started like just how many months. By the way, my name is aristic manuel. I'm the CEO and the co-founder of link. We are building with three crossroads premise infrastructure for Africa. We are opportunity to be part of the DFS Lab and bootcamp, probably at the beginning of the year around April, really was really fun experience for us. I, you know + +[37:00] I told when I spoke to anchor. I, you know like that was the last. I was like that was the highlight of the year for us- really got the ball rolling, borrowing towards us building products, towards relieving us, going to market. I think with my for my team, I think one of the things we would really always remember were really stepping back, really taking a breath of the whole experience, right to really understand, because when we came into the bootcamp, we had built just a piece of stuff around on the Stellar blockchain. So we're quite confident. We're like, okay, you know what we can, you know, skip the chase and go straight to day three or, you know, the fourth day- five to them would be. But you know, we had an awesome facilitator. I was really able to walk us through day by day, from the springs- we got to ask ourselves the toughest questions, from the sprints to the crazy eights- I think crazy eights- whenever we get crazy ate- and it really made us understand + +[38:00] What we are building, who are building for. So, until today, one of the things that we were able to take out from the boots camp really informs our decision, even around product deployments, even around understanding UI, ux and really trying to understand: okay, yeah, if you have a button there, would that be a ux problem? Or how long is your solution compared to, maybe, an existing solution? So you're really thinking about things. We went down to marketing. So for us was a really fun experience, a great one. We got to learn a lot- anything just. I think it was a week, yeah, really week. My team, really, you know, said, okay, this week we're really going to put our minds in. I think after the first day and we really saw how it was, but I think it lasted from 12 p m at 10 to 5 p m so it can be a bit at 10 to 5 p m so it can be a bit exhausting. But trust me, after the full week we're able to absorb a lot out of it, learn like we felt, like + +[39:00] We even knew anything, and then from there it really was a springboard through everything we're able to undo today. In fact, after me, after that I'm just probably, or two months later- who are part of the Stellar community fund, which is, by the way, like you know, doing something together, I would put some really awesome stuff, and we're winners of that too as well. So, yeah, so, my experiences throughout the bootcamp, I would say, like I said, the bootcamp was a highlight for us and really was that a major springboard to most of the things we're able to do today. We've had awesome partnerships since after the bootcamp with companies in financial space and other financial spaces, in crypto space, in and out of Africa, to really help us enrich. Our mission like Stellar was beautiful because weather payments still was built for payments and when you have Stellar helping you really solve, those difficult problems and payments in Africa, and then you have the boots, can really shaken in that your idea to really make you + +[40:00] Understand the things you should really do. You're in the right place. So, yeah, thank you very much. emmanuel. That was really- you know- great, a great experience rundown. I'm sure a lot of you know other to be participants would get you know a really good insight on what to expect. Thank you so much for also taking the time to you know, come share your experience. So, yeah, over to you. Marvellous. Okay, good day everybody. I'm marvelous aquinola, a cropster crash member and a participant of the Stellar bootcamp around April in crop to catch a season prospective opportunities at the early stage and seeing through them is one important thing we do not joke with. The knowledge of having to integrate into our existing platform was mind blowing a + +[41:00] Lot of deep thought needed to be put in place. I'm forced to see the opportunity that comes with it and to see how we can mitigate the challenges, to recommit, to get the challenges of integration into our existing platform and also to use the use case challenges, considering our major targets being mostly uneducated, the experience generally was intriguing and challenging at the same time, given that there are a lot of, there are a lot to achieve and in a little period of time, but I would say every time spent was worth. It was an exercise that fostered teamwork, as it presented an avenue for us to learn and as well as to network with other people. And one thing I admit about the old boat campaign is the watchword that says: get started, not perfect, and trust me during the process starting this 20 process of starting your imperfections with made perfect. And nothing I particularly enjoyed was the. Nothing I particularly + +[42:00] Enjoyed was the structure of the bootcamp, especially when, with the way we had to go from conceptualization of a solution to the implement, implementation of what is said of the cell solution. And it should be alive by saying we were not faced with a bit of challenges at the very inception of the old project but with the help of our personal structure, gifts and the moderator of the program, rk, we were able to get answers to every of our questions, although it is expected that every participating member of the team should be in a common room so as to share ideas and be readily available for discussion. It is also noteworthy to mention that there was convenience- the exp- convenience experience- weapon space in a boat camp- was very great and my take on with after the whole boots camp is that everybody's idea is very significant. It doesn't matter how important you may feel our ideas are. It will cost us nothing to actually share it with others. Who knows it + +[43:00] Might be the spark, that is, who knows it might be the spark that is needed to light up other people's idea. And I cannot mention how lenient and patient and also optimistic the project anchors at dls towards every individual team. So I'd like to say thank you for the opportunity and I wish everyone aspired the very best. Thank you so much, marvelous. Thank you so much for sharing your experiences with us. I see that some questions have already been answered on the Q&A already. So if you perhaps might have dropped your question, you can go back to the Q&A section and just read the answers. I see anchor went off like she went off, you know, to Laughter to keep the keyboard running by answering your questions. So, yeah, just feel free to go back there to check. I think we're at this stage where we pretty much just + +[44:00] Have the floor open for closing remarks and since you know, a bunch of the questions has already been answered on the Q&A chat. So, yeah, over to you, jake and anka. Yeah, I actually, thank you, jake for answering. I think there is a comment as I was reading. through everything. Yeah, I think there's a common thread. It's like especially like for existing or like you know that, because, like scf, right, it's not only for new projects that are completely new to Stellar, but it's also for those who are already a bit further ahead or even a bit further and are, like you know, submitting a new product on Stellar, for example, that they're thinking about. So, jake, like kind of like is like, how would the approach be different for these different types of? Can they also still, like, get value? Can people that are existing also still get value out of the bootcamp? Yeah, totally, and actually the bootcamp in the design square methodology works across any level + +[45:00] Of organization. We've done with unicorns who have massive product suites that are already, you know, processing billions of dollars per year and they still, you know they have product challenges as well. They can define a problem that they want to solve. They have, you know, somewhere in their product flow something's not working the way they want it to work. They want to, you know, ideate, test some new things, build a prototype of that new feature. The cool thing for companies that already have some version of a product built is often you can use either screenshots or, if you've already done some prototyping, you can modify those prototypes so you get really realistic prototypes, sometimes being embedded in stuff that already exists within your product and then just test something that's new relative to what you've already built. So, yeah, definitely doesn't matter what stage you're at, this is definitely useful still, even if you're further on. Yeah, I would agree, It's like it's just a methodology. It's not just like, oh, it's not just kickstarting your product and stuff, because obviously, like, you + +[46:00] Cannot just like build a whole product in a week, but it's really like singling out this like most pressing problem that you choose to solve in those few weeks. And then you get facilitators like that have experience. You get community like ecosystem partners that might be able to help you, and you can get like developers on Stellar helping with answering any type of like challenges technically that you encounter. So, yeah, like. Another like thing that I noticed here is that people are like: oh, are you like building a proof of concept or like an mvp after the like bootcamp? So, jake, like, what do you generally see bootcamp participants walk away with after participating in the bootcamp? Well, they're usually building a prototype and I and with the proof of concept on the back end. So it's like usually kind of a design, you know, some + +[47:00] Sort of design front end and that creates a user experience and that can be done in all kinds of different medium. You know figma power, we use powerpoint. Anything you know really just depends what you're comfortable with or where you, and then usually it's got a proof of concept, sort of technical integration on the back end so that you can actually make the machine and the machinery that needs to work behind the scenes, kind of just at least do the basics. Right now it's not performance ready and that kind of stuff, but you're just, yeah, proof of concept on the back end side. So it's a combination of those two things and the. You know the idea is that really helps you move. You know, in only three days you've been able to test both the validity with the customer facing you know, sort of prototype, design experience, prototype, and then also test the technical validity. So it's really that's the idea is that it's very accelerated way to be able to make a decision as to whether that's the way you want to go or maybe didn't work out how you thought and you avoid having to build a whole, you know, minimum + +[48:00] Bible product before knowing that it's not going to work. Yeah, that makes a lot of sense. I'm trying to- let's see- answer everything. I think we answered most of those things. Let's see how many. Oh yeah, so for the amount of projects we are aiming about like to see if we can get about 30 to 40 for the bootcamp, depending on how many submissions we get, we just have to see. Usually, like, for an entire like scf round, we're trying to get around 50 because that's usually the maximum amount someone can review like at all. It's already a lot, trust me. But for like, and that will be, like you know, until the final October, that 30 deadline. But we're expecting to have most of our submissions before the bootcamp because the bootcamp can really elevate your presence in the Stellar community fund round. Yeah, I mean, + +[49:00] I think that makes. I think that makes sense. Is anyone? If anyone has any last minute questions. Put them into the Q&A now. Maybe we'll be able to answer them. But, jake, I'm gonna give the floor to you. Well, we don't not much else to cover. There's a few things. Here on the screen you can see some next step stuff. There's some links and I think we're going to share this powerpoint, if I'm not mistaken. So. But you know, definitely go read more about the Stellar community fund. Obviously, if you're applying, you should read about it. But it's a really cool concept that Stellar has come up with the whole community fund idea. So go take a look and learn more, complete your submission before September 25th to be considered for the bootcamp and plan to come and give it all if you are participating, if you're selected to participate. So, yeah, that's hopefully giving you all good sense of what to expect + +[50:00] And what are the motivations and why, what you would get out of it and why to go. And, yeah, I hope to see you all there. Awesome bye guys. Thank you, emmanuel, and marvelous too, and thank you to your festlab team for participating. Thank you for bringing looking forward. Hi, everybody. + +
diff --git a/meetings/2022-11-17.mdx b/meetings/2022-11-17.mdx new file mode 100644 index 0000000000..79617704cf --- /dev/null +++ b/meetings/2022-11-17.mdx @@ -0,0 +1,117 @@ +--- +title: "Token Authorization and Clawback Controls" +description: "Discussion of CAP-46-06 covering Soroban’s built-in token contract, authorization semantics, clawback behavior, and how classic Stellar asset controls map into smart contracts." +authors: + - david-mazieres + - jake-urban + - justin-rice + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [developer, CAP-46-6] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +With Soroban live on Futurenet, the protocol discussion focused on CAP-46-6 and the evolution of Stellar’s built-in token contract for smart contracts. The goal is to faithfully represent classic Stellar asset semantics—such as authorization, clawback, and issuer controls—within Soroban while keeping the interface simple for developers. + +Much of the conversation centered on `auth_required` assets and how issuers can preserve compliance guarantees when tokens are wrapped into Soroban contracts. Participants explored trade-offs between complexity, performance, and flexibility, especially for regulated assets that want to participate in DeFi use cases like AMMs without bypassing existing controls. + +### Key Topics + +- Overview of CAP-46-6 and recent changes to the built-in Soroban token contract. +- Migration from `bigint` to `u128` for token balances and implications for standards like decimals. +- Renaming and alignment of functions (e.g., burn vs. clawback, freeze vs. authorize) with Stellar classic semantics. +- Handling `auth_required` assets: default deauthorization, issuer reauthorization, and balance initialization. +- How authorization state should propagate from classic trustlines to Soroban contract balances. +- Clawback behavior for wrapped assets, pooled balances, and issuer/admin controls. +- Differences between classic Stellar authorization levels and simplified Soroban authorization. +- When issuers should rely on the built-in token contract versus deploying custom token contracts. +- Open questions around interoperability, wrapping, and swap mechanisms between classic assets and custom Soroban tokens. + +### Resources + +- [CAP-0046-06: Smart Contract Standardized Asset](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-06.md) +- [Stellar Protocol CAP Repository](https://github.com/stellar/stellar-protocol) + +
+ Video Transcript + +[00:00] So these protocol discussions are where we have go over CAPs, which are Core Advancement Proposal these are basically specifications for how to update Stellar Core and the Stellar protocol in order to implement new features and make major changes any CAP actually goes through a process where it's drafted it's discussed in open on the Stellar Dev mailing list, which you can join. And then eventually it's brought to this meeting. When it's time for a synchronous discussion all CAPs go through a review process get vetted and eventually before they're approved there's a pretty lengthy engagement with the greater Stellar ecosystem finally. When they are approved they actually get bundled up into a new major protocol release and before, that protocol release actually hits the network goes live on the network validators have to agree to accept, that change. So there is also a validator governance step, that's required before the actual turning on of a new feature on Stellar + +[01:00] Stellar. So lately we have not had, that many protocol meetings and, that's. Because we've been I'm just gonna see. If I can invite some people up to speak and, that is. Because the CAPs, that we've been working on relate to Soroban, which is the smart contracts platform, that we're building, that will bring smart contracts to Stellar and a lot of work has been done on the implementation side right. So earlier this year we were talking through about eight CAPs and for. Now we've sort of worked on implementation of those we have launched them on the futurenet so. If you're listening to this and you want to experiment with saurabhan you can on a Dev network a Dev test network, that exists right. Now but today we do have a discussion, that we want to have around a CAP, which hit the mailing list there were some changes, that hit the mailing list earlier actually I think yesterday and so, that CAP is capped 46-6 smart contract standardized asset there's a link to the actual CAP in the show notes or in the event description so. If you + +[02:00] want to follow this discussion, which will be technical I definitely advise reading the CAP we're going to get into the details today. And so I is there anyone else who needs to be brought up to the stage I guess we can go ahead and start and we can bring people on stage as necessary I think to start with [CAP-46-6](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-06.md). So Darth do you want to just start out by talking a little bit about the CAP by walking through the changes and just sort of set up the discussion for the rest of the meeting yeah I can do, that. So this CAP is about a standardized token contract on Soroban or what we call. Now the built-in token contract and you know we already have an initial implementation of this, that people are using on feature net and this CAP has been out for a. While but I'll go over the recent changes at a high level. So the most significant one is, that the token balances have been updated from bigint to u128 and again these + +[03:00] are just updates of the CAP not the actual implementation yet approve the approved function was replaced with increased allowance and decreased allowance methods there are some naming changes to make Soroban terminology match up with Stellar classic like burn was change the cloud back for use of the authorize unfreeze to authorize we allow the admin to disable clawback along with rules on how it's set for wrapped assets. And then there's this wasn't explicitly out of the CAP it was in the mailing list discussions around how auth required should be handled and I had two ideas there, that we can discuss and it sounds like author bride is something, that we will need to handle yeah. So does anyone have any questions Russians comments yeah it started something simple the naming conventions I understand they align with Stellar classic do + +[04:00] they also align with the you know the greater kind of like cryptocurrency ecosystem and equivalent contracts on say Ethereum. So the burn to clawback the, that actually matches up with erc20 for example just burn on the OpenZeppelin erc20 standard actually refers to a user burning their own balance, which is not what burn does it does in the total contract today in the sermon tone contract, that matches clawback for freeze unfreeze it this is a question we had it was like I switched it to authorized the author just. Because we want to be we want clawback to be able to be used on Frozen balances and we didn't want someone to make the assumption, that a you know a frozen balance can change. Because it can through callback I believe on the Ethereum side + +[05:00] it's actually called I think it's blue it's not possible and you unpause it. So it's a little different from, that do you have concerns over those differences yeah I think the blacklistable contracts, that for example uscc adheres to actually use as a completely different language, which is blacklisting, which yeah its own issues yeah, that's unique to USDC right, that's not even you know like a common erc20 implementation, that's a good question I haven't looked at others I'll take a look at, that okay yeah I definitely think, that it makes sense to just like choose something. And then go with it. But maybe just like survey the ecosystem and + +[06:00] and make sure, that there are no standards, that were overlooking it doesn't look like we have okay you mentioned some the issue with auth required and potential Solutions can you talk a bit about a what the issue is to reiterate, that and what are some potential Solutions yeah. So currently on solar classic. If the issuer has the auth required flag set any new trust line will be deauthorized by default and the reason for this is, that you know the issuer may want to verify the owner of the trust line to make sure, that you know they can go they go through kyc and you know you go through you pass whatever regulations, that they need to. And then you enable the trust line and they can do what they want, that concept does not exist at + +[07:00] the moment in the built-in token contract so. If you've sent to a New Balance. If you send an asset, that is author card on the classic side, that's wrapped. And so our body you send it to a new balance, that this new entity can receive it and this breaks the you know the author required semantics. So the one idea I had was, that in the Tome contract there are no balances by default in the beginning and they only exist once they are you know once it's actually written into a ledger entry. And so you can kind of you can treat, that right as the initialization of the balance my idea was to tie like. When you do, that new like the. When you create this New Balance you check the issuer and. If it does exist. Then you block it and you require the issue explicitly proves, that trust all right it's not, that trust line approves, that balance or, that identifier before it's created + +[08:00] there are some issues around this related to how you would actually indicate, that it's authorized. But this is like, that's, that's the best idea I have. So far the other one is to just not allow it's just disable add-on controls or not admin controls on sarban contract for wrapped assets, that have auth required set. But this makes it much more difficult for author guard assets to be used on seller I'm sorry. But I'm sorry there's also an maybe an ax-like approach in, which we just don't enable Soroban like importing exporting at all to off acquired assets, that's true. But I Jake has mentioned, that there are some partners, that are off the choir, that want to participate in Soroban so, that wouldn't be possible in, that case + +[09:00] it depend you know it depends on what we want to do with those assets are there drawbacks to the first approach, that you outlined it adds it just adds some complexity like for example how do you store this I made this change in the CAP. But it actually isn't ideal where before we had this a flag for. When things are frozen, which is not the normal case so. If you never freeze the balance in the current tone contract you never write this extra Ledger entry but, that writes a specific flag. But I changed the CAP to write the authorized flag. So every balance will have a new entry, that says authorized so, which is not necessary by default for non-auth required assets I have an idea to fix this you know you can tie the balance and this flag in a single Ledger entry. But it just adds some + +[10:00] complexity. But I. But I think we do need to either not allow author required Assets in Soroban or you know allow them using a mechanism like this. So I feel like. If you want to allow author required, that's a story about like this is probably the best or something similar is the best thing we can do it I said just to be on the same page this means we effectively increase the complexity of the these rap token contracts. But the standard token contract, that you can still you know like issue, that's pure Soroban does it also inherit these complexities I think we can do it in a way where it doesn't right like it. If because you can. If you can tell, that it's a wrap token contract and in, that case. Then you can check the issuer and check them out the required but. If if it's not a wrapped token. Then you can ignore all of + +[11:00] this right and from a contract developer's perspective, that wants to interact from their perspective it's just the standard like are we changing the standard token interface or from their perspective it's just another erc20 like standard token implementer the interface shouldn't have to change maybe the semantics around authorization the authorization would I think we can do it in a way where it wouldn't it would be transparent to Soroban tokens, that make sense yes one thing I'll just say quickly is, that you know in today's today on the network the assets, that use author required they there are a lot of account holders. But we are seeing organizations, that are + +[12:00] looking to have health required assets be used on soar bomb balls. So used by a fairly large audience. So I would I don't know. If you know going forward with an approach where arth required assets just simply aren't usable on Soroban is I mean we could do it. But it's going to cut off some use cases or for organizations, that you know from a Regulatory Compliance perspective like need to have, that this requirement kyc like holders before holding their asset. So Jake just to be on the same page like you're saying, that there is a group of issuers, that is growing and is author required or planning to issue author card assets and out of this group there is a non-trivial subgroup, that is interested in having + +[13:00] their assets available and for like Defiance Orban exactly yeah we have a partner today, that's looking to you know participate in like AMMs and other like deep iconstructs on Soroban. But the asset they would like to use they are planning on using author required okay one aspect of this, which I think is a good thing. But it's good to know is. If you go to the first approach I mentioned the issuer will need to approve any address and any identifier on Soroban, that wants to accept the balance. So even. If it's like a liquidity pool the issuer must you know approve, that liquidity pool's address in the token contract yeah understood I think, that's actually exactly what they're looking for yeah + +[14:00] there are a couple questions in the live chat too I don't know. If you want to take a look at them sit or you want me to just read them out let's see. So where's the yeah I well one is oh yeah and then. If you look below, that there's some there's three there's a yeah two questions. So for Quebec sorry tomorrow do you wanna go yep okay clawback is I think it's something we wanted to support. Because it's supported on the classic side and right. If you have a an asset, that is clawbackable on classic and you move it over to Soroban. Then you know it's a control you + +[15:00] want to give the issue and it can be disabled I do agree, that quadak versus Burns is a little confusing we don't actually like the CAP doesn't specify a burn in the ERC 20 cents and maybe it should right. Now I think, that like what you could do is send to a zero u25519 address. But maybe, that's not sufficient. But I agree maybe we can I can think about like naming differences there a little more. So for the. So vermouth's question for the decimals I agree. So yeah the CAP is actually missed I believe it's missing decimals for the non-wrapped case and the idea there was, that you know you the user could specify whatever they want. But we should specify a standard and I don't we haven't discussed, that yet this still depends on the discussion, that you know we moved from big into U1 128. But I think there's + +[16:00] still some discussions have to go on there with what we think a default decimal should be. So maybe we can start by is there anyone, that objects to u128 as the as a standard token contract number type okay going gone okay and so, that's moot's next question for the auth required Solutions. When we run into weird situations where only Stellar accounts can participate or other non-auth required built-in tokens support Stellar accounts contracts and basic 825519 keys I can't think of a reason why this would happen off the top of my head there's + +[17:00] no difference you know this author required mechanism, that I mentioned and a identifier, that is the authorized. So I'm not I can't think of an example like this. If you have one let me know and I can think about, that what about looking at wrapped smart contract as an on off ramp I'm not sure what, that means okay yeah Lee yeah I'm just thinking about the term callback and you know. If that is a problematic term I mean it's I feel like the term is understood in other + +[18:00] Financial ecosystems. But maybe yeah. If it. If it's not a good term to be using sort of going forward in a blockchain world here would it be termed. Because do you think. If if we said, that the admin could do a transfer from one from any account to any account like it's sort of the same thing as clawback and it's sort of maybe bad to do, that. Because we're overloading an operation, that can suddenly do something, that might be surprising yeah I'm one way to like try to eliminate, that term without needing to replace it yeah I mean, that sounds like a little more complex right it's not even the same thing. Because in club in the cloud back the balance gets burned right, which is how it works on classic and currently in the token contract in your example it wouldn't get burned right it would get sent to another address I guess you could send it to the zero eighty two five one + +[19:00] nine key I think this is something we should discuss. Because to me you know clawback already exists on Stellar right. So and it works almost the same on Soroban. So it made sense to me to change the name but. If enough people it like I think it's something we should discuss yeah I'm not I personally think clawback's probably fine. Because we already have, that definition and I think it sounds like the folks who are planning to use this today already understand what it means sorry I have a bit of a question on a tangent to why you just mentioned you said, that. So callback what. So on Stellar. If you transfer to the issuer you're essentially burning. Because the answer disappears. Because the issue can't hold the balance of the asset the issue what happens. If you transfer to the admin I get the or + +[20:00] transferred to the issuer on the Soroban ation it doesn't get burnt it just it updates the issue with balance like the issue or the admin account doesn't have any exception. When it comes to like transfers or anything like, that big other things. So authorized trust and inheritance classic in case you need a slower account detain authorized flag so, that is how it works on classic like you need the trust line to get authorized by the issuer and on Soroban we would imitate the same at the admin on Soroban would need to explicitly authorize on Soroban we'll have we have this method called authorize and we'll authorize, that balance + +[21:00] does, that answer your question oops okay and I the naming there like maybe you need to think about, that some more about the fact, that there's you know you have you can authorize it on classic I mean also authorize on Soroban. But server authorization doesn't work the same like it's much simpler than design could you elaborate what's the differences just. So we're all clear on, that yeah. So you know in classic you can there's a lot of different levels of authorization, that's what I mean you can have you know fully authorized through the authorized you can be you can authorize to maintain liabilities right maintain offers and liquidity pool deposits. But you can't do anything with your balance and I believe there's also a flag, that + +[22:00] says you can actually cloud back from the trust line and, that flag is maintained currently in the proposal in Soroban like the clawback flag. But it's at the contract level and not at a balanced level. Because my the thought process was I don't think it needs to be at the balance level and it would add a lot more complexity. But yeah those are the differences all right any other questions any concerns do you have the answers, that you need to move forward I believe I do. So before the author choir solution I think I need to rework what I have a little bits to make + +[23:00] sure, that the non-wrap scenario doesn't change or get any worse other than, that it doesn't sound like oh, that and I need I should like naming right looking at burnt versus clawback in the ecosystem yeah about other than those two things I don't think there's anything else I need to look at well I think there is maybe like a symmetry question there right like, that I was just thinking about, that is. When I think of the building token contract versus the more generic interface right, that we are exposing, that people can use. When they implement the contract isn't there like. If we have like this auth required thing right like it basically limits the way you can do payments right from the outside + +[24:00] so like is, that pointing to like a I don't know like having it's basically an implicit like yeah like hook you know I know the original CAP had like this like section on cooks are evil you know we should never do, that. But at the same time this looks like actually like a hook to me, that happens to be a classic hook and I'm just wondering like. When we say also, that people, that are today looking at implementing Earth required assets want to move some of the functionality into Soroban are they really going to be satisfied with those limited semantics as opposed to I know like a lot of the things, that people were asking in the past around programmatic you know being able to basically control the flow of assets + +[25:00] was hey I want to put limits you know on transaction on transactions like you know you can only move ten thousand dollars a day or whatever right like things like, that. So yeah wondering about, that where does, that fit in this picture the you know the first requirement at least I was, that I was following is, that you'd like to be able to support anything, that can happen on classic in Soroban right in the bold and tone contract, which is the you know the goal of adding support throughout the Acquired and clawback what you're saying is an interesting idea adding hooks to soak admin's can customize what happens. But one you know one concern there is you would end up executing like the one of the nice things with the built-in tone contract is built in right it doesn't execute wasn't but. If you have these hooks they would be in wasm right. So you would you take the performance set + +[26:00] and in, that case I don't know you could argue, that they should you should be using a different tone contract altogether like one, that just adheres to the interface do you agree on, that you mean like yeah I guess like as soon as you have hooks you can't do like a classic payment for example. Because you're bypassing the hook is, that what you're talking about I'm not sure what you mean by, that I'm saying, that. If you add a hook the hook would be in wasn't right like you've called into another contract yeah. So for the built-in yeah I guess for like the default Behavior you would yeah he would just call the built-in token contract, that would have a default implementation right to implement those what required type of semantics. But I could see like the need for maybe like final control in general and oh the Fire Control, that we would provide later on or, that like anything issuers would want + +[27:00] to yeah to have yeah I guess maybe those end up just being like separate you know they are not using the built-in token contract at, that point so, that's kind of what I'm trying to wonder I'm wondering about is like those use cases without required Assets in Soroban they are not people are not going to be able to actually implement I think the type of things, that I'm looking after, that is like I said like people asked before the feature requests are on the hooks were things like maximum our conditions on the transaction right like not just it was not just a flag it was basically like you know open-ended type of conditions. And then use the built-in token contract, which may be okay. But you know I. Then I'm kind of questioning the are you talking about from the perspective on net new issuers or from the perspective of existing issuers, that + +[28:00] want to introduce new constraints or like new yeah, that's kind of I'm just wondering like the people, that have like those like as out required assets. If they think, that they can basically over time add more conditions this is actually not going to work right you got it. So to some extent I hear like a question to Jake. If it's still here, which is you know these new issuers, that are coming on board and are interesting and auth required assets maybe the solution for them is to actually issue a custom contract on Soroban rather than an off-acquired acid on classic and kind of like trying to constrain it to be what they want it to be yeah we've talked about you know something similar with like regulated assets, that need much more defined Grant control + +[29:00] and in those cases I think it makes sense to use a new token contract all right the middle to the built-in tone contract can't do everything right and I don't think adding these hooks is a good idea into the built-in token contract yeah just my opinion yeah I think their position should be, that. If you want these type of conditions and restraints you should definitely deploy your own contract, that adheres to the standard token interface you know assembly, that we're seeing in Ethereum and other ecosystems okay. So let me you know said I think we're at a relatively good place with this with + +[30:00] this CAP I do think there's a there's like this parallel discussion, that probably hasn't matured to bringing it up right. Now on you know this versus like a more discussions you know we abandoned a few months ago to make progress. But we need to go back to in terms of you know single balance versus two balance approach. But I think we need to have a better understanding of what the possibilities are there before we can actually make like a you know like the go-ahead decision with regards to this specific CAP yeah I agree anything else anyone wants to discuss or are we done for the day + +[31:00] okay cool I think we might be done I'm just checking the live chat to see. If there are any unanswered questions there's one more from moods, which is in the event issuers are pushed to write a custom token contract would there be any core support to migrate between a classic slash custom token I would say no, that's an interesting question. But my intuition would be, that you know you're trying to do something, that you know it isn't even supported in classic. So it makes sense to be a separate token but, that's something, that we need to think about more yeah I haven't thought much about how you know how these custom token contracts would work + +[32:00] and. So this isn't and responds to mooch's question on how these. If there would be core support between class and custom tokens yeah I think this is an interesting question. Because with the built-in contract, that we have. Now you there is a swap essentially a swap capability you have the import export functions, which you know allow you to essentially swap a token in classic with one in Soroban and. If we are. If the story, that we're developing is, that. If you want to you know be able to build a custom token, that provides all these custom auth required type capabilities I think we do need to have some sort of story for how do you replicate, that import export how do you swap something, that's on classic with something, that is over on Sovereign so, that. Because I can imagine, that issuers even. If they want, that more custom + +[33:00] capability on sarban they probably still want to get access to things like the things on classic like they probably don't want to be siled into only the sorrow and ecosystem yeah, that's a good point and without something without some sort of swap capability and without the ability to. Because we're not going to allow classic operations and solvent operations to run in the same transaction I think we're talking about some sort of off-chain swap capability, which sort of it's not great yeah this is something to think about like we you know you and I'm not sure how this would work. But you could offer import export big host functions but, that doesn't sound like a good idea to me. But or do it off-chain as like you mentioned yeah it's something to think about + +[34:00] any other thoughts okay it looks like there's on one new one in the live chat from Demetrio Stellar why not just have the same sort of one-to-one swap between the rap classic asset token and the new token not the best ux. But it's doable right. Now I think you'd have I gotta think about this you might have issues with like how would you do a lot like, that would have to exist as a wait no I'm not sure how. If that + +[35:00] would work like you would have to call import export and like. If I write a contract right. Now Custom Contract I wouldn't be able to call, that right well I think the issuer there would be the contract right kind of yeah right. So you don't actually need special anything it just happens, that yeah like the, that's true you would have complete control over it well it's yeah complete control now. Because you can get the authorized. But yeah, that's true okay yeah let me think about this okay it feels like we're sort of winding down here. So I think we're done for the day + +[36:00] thanks Sid for putting this out there thanks for answering the questions thanks for thinking about it and to everyone else I realized, that today's protocol discussion was sort of ad hoc and popped out of nowhere next year or after Thanksgiving as we start to have these meetings I'll try to be better about just setting them up more in advance and just being a little bit more making people a little bit more aware of what we're doing what we're talking about so, that everyone can sort of get a bit of context for the discussion starts. But for today again. If you want to sort of keep up with these discussions join the Stellar Dev mailing list. If you want to read this particular CAP it's in the CAP repository, which you can find on GitHub this is [CAP-46-06](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-06.md) the smart contract standardized asset and with, that I will say goodbye to all of you thanks + +
diff --git a/meetings/2022-12-07.mdx b/meetings/2022-12-07.mdx new file mode 100644 index 0000000000..0952c4cae9 --- /dev/null +++ b/meetings/2022-12-07.mdx @@ -0,0 +1,153 @@ +--- +title: "Instant Soroban Developer Environment Setup" +description: "Walkthrough of an instant Soroban development environment using Gitpod, showing how developers can build, test, and deploy smart contracts without local setup." +authors: + - anuhya-challagundla + - kalepail + - leigh-mcculloch +tags: [soroban] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session focused on lowering the barrier to entry for smart contract development on Stellar by introducing an instant, browser-based development environment powered by Gitpod. The speakers demonstrated how developers—especially those new to Rust—can start building Soroban contracts without installing toolchains or configuring local environments. + +The conversation blended a live demo with practical guidance, showing how the preconfigured Gitpod workspace includes Soroban CLI, sandbox and Futurenet terminals, and example contracts. By compiling, deploying, and invoking a simple Hello World contract, the team illustrated the full developer flow from code to on-chain execution, while highlighting how this setup will support upcoming Stellar Quest challenges. + +### Key Topics + +- Overview of the Gitpod-based Soroban dev environment and why it removes local setup friction +- Preconfigured terminals: sandbox vs. Futurenet, environment variables, and Docker startup commands +- Using the Soroban CLI to build Rust contracts into Wasm artifacts +- Deploying and invoking contracts locally and on Futurenet +- Inspecting contract invocations and host function operations via Horizon and XDR +- Writing and running Rust-based unit tests for Soroban contracts +- Understanding concepts like SCVal, contract arguments, and ledger footprints +- How preflight/simulation helps determine footprints automatically +- How this environment ties into future Stellar Quest and learning content + +### Resources + +- [Soroban Documentation](/docs/build/smart-contracts) +- [Soroban Examples Repository](https://github.com/stellar/soroban-examples) +- [Soroban CLI Reference](https://soroban.stellar.org/docs/reference/cli) +- [Stellar Laboratory](https://laboratory.stellar.org) + +
+ Video Transcript + +[00:00] Hello hello everyone welcome to Soroban talks I'm anujia I'm from the Stellar Development Foundation and I'm super excited to bring on our amazing Soroban developer Lee and our Deborah lead Tyler onto stream hi everyone hi friends all right. So this is Lee's first time on stream with me and also it's like my second stream and we're considering making this into a stream a series of some sort so. If y'all want to do, that let me know in the comments. Because I would love to. If y'all want to see me more on stream and Tyler on stream we can all alternate Tyler. But yeah hey everyone. So we're gonna start off by playing a game of chrome Dino I don't know Tyler have you ever played Chrome Dino all the times, that my internet goes down out here in the country my score is still not very good. But I do like a little Chrome Dino exactly. So + +[01:00] we're gonna start off by competing with Lee Tyler and myself and see who gets the high score on Chrome Dyno. And then we will jump right it's like ultimate beta switch we're gonna play Chrome dino games you're actually sorry we're just gonna start playing Chrome Dino and figure out who's the best from Dino Champion it's Stellar all right. So Tyler I'm gonna let you start first and we're gonna see how you do oh my gosh we're not gonna do these concurrently all right here we go yeah we're gonna see who does we're all watching okay in the meantime it's gonna soak up all five minutes here I'm gonna start getting my practice in I don't think it's gonna take all five minutes Tyler you can't be, that good I don't know my skills oh. But hey Kanye Nest conniness wrath is in here there you go I'm using here and + +[02:00] hello feeling stressed watching this right, that's I died there we go right. When Raph said feeling stressed watching this Tyler you know all right Lee are you ready for this I'm ready. So 3 55. Okay all right I'm frustrated there's two more zeros after, that like people actually are getting, that high foreign to it the double jump speaker busted Okay Tyler takes the eye I'm like we'll have good RAF are you gonna make us an NFT for whoever his high score on this all right here we go wait why is it oh okay here's cactuses it's not even fair you have to run like + +[03:00] 100 feet before you even hit it all right we're getting there, that additional like twinkle sound whenever you jump over or I guess whenever you get like a hundred or something it's very stressful okay better than Lee I'm proud of, that all right sweet. So we could play another round or we could keep going I think we should just kick it off how do you feel about, that yeah let's get rolling all right. So a couple different announcements. While we get settled in is basically today's stream will be about one two things actually. So we have an instant Dev environment set up for Soroban so. If you're someone like me who is basically never touched Russ till Soroban started and is just. Now getting started and has come from like a web to like react python background and you're like how do + +[04:00] I freaking get started on Russ well we have an answer for you rightly the get pod answer. So we're gonna share, that today with all of you and do a couple different demos with it and this is your chance to ask Soroban Dev Lee I keep saying, that Lee I hope you're okay with me calling you the Thor of on death me calling you the Thor of on death League. But a Q&A with him I posted in our Discord a link and I will share, that here as well. But it's basically a link for you to put in your questions as you get them you're also more than welcome to share them on the stream we'll see this chat on stream as well. So whatever works for you use it as you like and. If you actually want to come on stream let me know in the chat on the Discord, which is the hashtag live chat, which is the live chat Channel and I will DM you the link to join us on + +[05:00] stream actually and bring you on but, that will be a bit later on straight. So I'll let Tyler kick it off go on in super yeah. So as we mentioned last week I kind of dove into sort of on myself for the first time a couple weeks ago I was very intimidated by it I was also very busy and the idea of trying to learn rust was kind of like no thanks think I'll save, that for later. But necessity kind of pushed itself you know okay where everybody's learning Soroban. Now and this is kind of the thing, that I've wanted ever since I found Stellar way back and wanted to do something, that Stellar didn't have an operation for the ability to have a smart contract running behind a single Stellar operation. So we'll get into the nitty-gritty of exactly what's going on. But I found it to be far easier to kind of get up and running than I + +[06:00] thought it would be most of these rust smart contracts are actually really small compact a lot of your programming interfaces will actually be on the front end, which is typical of smart contract development. So it's not, that hard and now, that we've introduced the get pod thing. So I was like I was coming two weeks ago. And then I found get pod and oh my goodness like you just click a link and you have a full rust sarobon environment right inside your browser it's the vs code editor, which is the one I use every day anyway, which is amazing, which is kind of this call out to we're going to be doing Stellar Quest anybody is familiar with Stellar Quest our next Stellar Quest is going to be a sarobon quest and it's going to be all done right inside of a git POG. So we're not a Kanye we're not going to have to answer all of these hey I'm using Windows and can't figure out how to install rust we're just going to focus on the Soroban pieces not on the environment rust future net + +[07:00] configurations it's kind of compiling all this into the quick wins of just figuring out getting our feet wet building out smart contracts. So I am going to bring my screen up and by me I mean a new here bring my screen up to introduce something, that I think Lee you added maybe last night inside of our Docs. If you go to any of these examples they. Now have an open in get pod link, which will open up all of these different examples right inside the VM the get pod VM where you can run these examples right inside of our example Soroban examples repo it opens up, that repo inside of a gitpod, which is amazing we're not going to do, that right off the bat, though I'm going to open it up with we've got a sort of on Pioneer Quest we're actually still building this out. But I thought I'd go ahead and share this is kind I'd go ahead and share this is kind of the idea is this is going to be a + +[08:00] pretty big deviation from what we've normally done with Stellar Quest where you're playing it with laboratory you're doing it in `quest.stellar.org`. Now you're going to be doing the class right inside of a get pod, which will be a little bit disorienting right especially for people like nisho who get very angsty and start clicking buttons. When they don't quite know what they do yet. So this is going to be allow you to kind of familiarize yourself with the environment and, that's we're going to walk through today is this kind of get pod environment I'm going to walk you through this is not going to try very hard not to leak any of our quests in this repo. So the actual repos the secret repo good luck finding it. But this particular one you can go ahead and find and open up I'll be adding Elliot's writing some really good readme material I think he's even going to do a video you'll get to see Elliot on screen, that'll be fun walking through the get pod. So you'll have no excuses to know exactly how to compete in the Stellar Quest + +[09:00] compete in the Stellar Quest whenever we get these live hopefully in November okay so. If you click this open and get pod, that's gonna launch your new get pod instance I've it takes a couple minutes to spin one of these up not minutes it takes a few seconds to spin one of these up. But depending on whether there's another pre-build running or what have you. So I've already got one stood up here in my browser here this is probably quite tiny. So let me increase the size of, that slightly. So what you're seeing here basically you've got a couple of different areas of navigation. If you're familiar with code editors IDs at all particularly vs code this is going to at least not feel completely foreign to you. But let me just go ahead and walk through a couple of these items. And then we'll begin interacting with them make sure to be keeping in Your Mind questions write those down in notes or something or, that type form, that a newfia sent out. So we can answer those later on particularly ones, that I won't be able to answer. Because I am still a + +[10:00] newbie like everybody except for Lee all right. So probably one of the most critical pieces other than the actual code, that we'll be looking through is your bash terminal tab over here. So I've actually got a futurenet, that spins up with this get pod and so. If you were to go to like the ports over here this little ports tab you can open up this is actually delivering right from inside this get pod you've got your futurenet we can see the latest ingested Ledger one zero three one. So Ledger one zero three one. So we're syncing successfully with the future net, that's amazing, that all gets configured for you. If you ever want to explore like okay, that's fancy. But it's a lot of magic inside of this git pod is the commands, that actually spin up these different terminals. So you can see exactly what commands are being run. So this is how we actually run up, that Docker quick start saurabhand + +[11:00] Dev Docker image so, that's pretty cool the other two things we've got going on are this futurenet bash and a sandbox bash. So these other two terminal Windows this first one is one will be we'll interact with both. But this one the sandbox is kind of this safe local playground doesn't interact at all with, that future net, that we have. And then this futurenet one this one's connected with environment variables attached right into the bash window with all the environment variables we'll need to actually interact with this future net, that we have running. So you can see these environment variables running on this futurenet CLI. So those get passed into the environment on, that bash. So it's to try and help you. If you accidentally like want to run a future net inside sandbox you'll just have to pass these environment variables yourself and. If you want to run sandbox in the future net you'd have to toss those environment variables so. If you + +[12:00] end up running into errors or issues pass in them manually or you can just look over here and try and toggle back and forth every. Now and. Then you will definitely get boogered and maybe just restart your git pod or figure out the magic from inside of this gitpod yaml file for the commands, that were actually run to spin those up you can use zoom in a little bit into the screen not everyone is as old as you are new here darn yes good point there we go much better or even Kanye can see it yep all right. So those are our terminal Windows we've only got three running up in here let's go through we've talked about the environments the future net here the environments the future net here. So this is the this is a quick start image, that basically spins up your own like Watcher node. But it's connected directly to the same future net, that's running, that you can like access in laboratory right + +[13:00] now for example. So things, that we do inside the future net here will show up in like. If we were to explore operations for a particular account so, that's all quite nice the main obvious thing, that we're all here to do, though is to actually build and test and run contracts and this is where we start to touch a actual rust code I've only got one Quest up in here right. Now really to kind of emphasize just the environment and learning, that bit first I just pulled the hello world example from the example the Soroban examples repository so, that's the one, that's building up here so. If we run you could run cargo I guess make build. But for people, that are maybe unfamiliar with the make file it's basically. If you're coming from the JavaScript world it's just like the scripts inside the package. So it's basically just going to run + +[14:00] this command I'm going to allow pastes. Because I'm not my own hacker sweet so, that was nice and quick less than a half of a second and, that'll put, that built wasm file. So you write rust. And then it compiles down to a wasm executable, that's the thing, that you'll actually deploy or run on the contract. So a lot of times. When you've got the Soroban CLI and you're calling something you'll be referencing this particular wasm file, which Speaking of saurabhan you can we've got those installed the Soroban command. If you're like okay well, that's also magic how'd you install, that guy Lee has built us a nice little Docker image, that pulls in the default get pod + +[15:00] Docker image loads up all the stuff we need for, that. And then it installs things like Soroban and cargo hack and some other things, that we might need I'm actually working on a Stellar Quest CLI. So the you can run all of your Stellar quests right from the command line I'll also install, that as an executable inside of this Docker file as well. So between the get pod and the gitpod docker file most of the magic is contained within there, which is helpful to know for the pro users out there. So anyway we've got our sort of on command line got loads of nice sub commands to run here like invoke inspect all those wonderful things let's go ahead and really quick. So we went ahead and we bundled built the contract. So it exists you can see right here 13 lines of rust code to say things like hello something, which + +[16:00] okay fair enough, that's not, that crazy. But usually there isn't, that much rust code to actually pull off some pretty complex logic like. If you look at some of the I think we've even got an AMM example in there we've got lots of examples and I don't think any of them ever really start to cross Beyond like 200 300 lines of code most rust contracts are just very Compact and small and once you understand two things first rust is actually really like. When you run things they'll break. But they often will tell you exactly what you did wrong. But also a lot of the rust stuff is contained within the standard library. And so a lot of the things you might need to learn or do would be inside the standard library. But we've got this flag here, that says no standard Library many times, that will bite you. If you're very experienced in react you're like where's all my strings and map hash maps and stuff. But where there is no standard Library there's the Soroban SDK + +[17:00] it's nice. Because if you're coming from JavaScript and you're just trying to learn this thing there's actually less rust stuff you can do, which keeps, that scope of things to learn quite minimal and keeps all of the documentation right inside of, that `soroban.stellar.org` forward slash docs all the things you need to know are kind of within, that domain, which is super nice all right sweet. So I'm going to see. If I can remember how to actually run this contract. So we've got it built. So we should be able to actually call it I'm going to make sure I'm in my sandbox CLI and. If I remember my homework we should be able to do Soroban invoke and whenever you get lost you can add, that H flag and it will tell you the different options, that you have available. So we are going to need a an ID and this wasm location and then. If it's got, although we need to call the specific function. And then any arguments, that it might contain. If we were running it on the RPC we + +[18:00] would either need to pass these flags or they will be pre-filled you'll notice. If I run this exact same command Soroban in Vogue help these sarobon are these RPC secrets are already filled in for us so, that we'll use those by default. If we're using this particular bash tab, which is nice okay. So we got, that let's go into we need an ID, which we'll just pass it one doesn't really matter. Because we're going to be calling it locally right off of the target I'm at root in my directory. So I should just be able to go Target. So this is going to be our actual wasm file, that we're targeting Target wasn't releases and I think the name of this was Soroban contract Blossom the argument + +[19:00] I guess before we call our argument we need a function what function are we going to call the functions we have available to us are a big fat stack of one of them and it's hello and as per tradition we will call the argument with world and. When we run, that we get the response just like it told us it would print out the symbol hello. And then the two, that we passed it, which is also this symbol, which is the argument, that we included. So we start with this default environment argument and the second one is, that argument, that we passed and you could include other arguments. If you wanted. But we just had this one world. And then it's going to print back this vector hello world fantastic, that's amazing you might say but, that's not actually a network fair enough let's see. If we can copy the same thing over to our future net brilliant okay. So we before we can + +[20:00] invoke, though the function contract actually needs to exist on chain. So let's go ahead and deploy I think, that's a command it is indeed we need to deploy it with the wasm file and I think, that's basically all we have to do. Because our secret key RPC nodes network passphrases are already filled in for us. So deploy Horizon from the target 1132 release Urban, that's going to deploy, that straight on to our future net amazing. So this is our contract hash so. If we wanted to go and invoke I'm going to do the crazy thing of just copying this moving it over to our future net and the only thing we need to change here assume, that rebuilt a good futurenet he honestly didn't build the future. Now + +[21:00] there's lots of people involved you also want to remove the wasn't I need to remove the wasn't this is what this is yep passing an actual ID we don't need, that we should send, that straight over to the Future net and we get Hello World back. Now I know like many of you're thinking well, that's fancy show it to me in laboratory well I can't actually run this in laboratory yet. But what I can do is show you the operation, that we just committed and I'm going to do, that by committing the cardinal sin of looking up, that secret key, that I use. So let's actually let's do something a little bit different let's generate a Brand New Key pair on futurenet, which all of them are the same. So let's just paint, that I'm going to write this down in the notes, that I have over here. So I don't forget it + +[22:00] right sweet. So now we're going to go and we're going to deploy this again this time we're going to use a new secret key, that one, that we just included so, that I don't have to commit the Cardinals in deriving a public key from the secret key on stream, which I did just generate a key on screen but, that's fine it's a test net whatever okay. So we committed, that submitted, that so. If we right now. If we went and looked up our explore endpoints operations for account and we just order this by descending we're going to notice, that brand new operation invoke host function, that's really the only operation, that classic Stellar has kind of added and it's very cryptic and hard to understand, which is why we have Lee here let's go ahead and call another one. So we did the deploy. Now we can do the invoke got a new contract ID. So let's put, that + +[23:00] in unless we pass our new put, that right here new secret key there's nothing fancy with the secret key it's just a Stellar thing you're just signing the transaction, that you're going to submit to the Future net. But rather than having to like submit an XDR or something you can just pass in the secret key as an argument and it will do all of the signing and submitting for you. So there we go we got our hello world back and. If we run this again we should see two host invoke host functions so, that was the one, that actually built the or submitted the contract create contract with Source account is the function and this one is host function invoke contract. But again Wicked obfuscated behind the Allure of what in the world does this XDR actually have + +[24:00] behind it so, that's kind of where my demo ends this is like to me this is again kind of mind-blowing where you can build these things relatively quickly and easily whether it's on futurenet and actually able to explore it inside of Horizon or in your sandbox right we could just go in here and do a public function, that reads something or maybe it saves some data all those other examples, that we were showing. But like it's actually pretty simple to write a contract. And then deploy it run it and see it in laboratory just like you would do with anything else you might do with Stellar the only difference is what we had before was here's an operation, that is make a payment okay. But I want to do something fancy too bad you can make a payment with this operation. Now we have an operation, that is invoke smart contract, which does whatever the heck you want to inside of whatever the functions are behind the contract, that was uploaded. So it's like this do whatever you want operation in Stellar and my goodness I've wanted, that for a long time and to actually see it working and it's kind of + +[25:00] simple and you've got this nice git pod environment to do all this stuff I don't know about you. But I think I'm going to quit working for SDF and start writing smart contracts. Because this I haven't been this excited about actually building on Stellar since I found Stellar way back in 2014. This is amazing and again like you're discovering Stellar for the first time in 2014 like okay sweet I think at the time there's like 12 things you could do with Stellar. And then now there's like 22 or something. Now there's an infinite number of things you can do with Stellar and, that is incredible like I was super excited before just finding Stellar. Now I mean this is something else and I don't know I'm I can't wait to start writing like NFT weird NFT smart contracts all, that stuff, that I was trying to do with Stellar Turrets, that was very difficult and lots of hoops to jump through. Now now this is this I can't wait to just start writing weird contracts my mind is it's been hard to shut down for all the crazy things I want to do + +[26:00] I might have to create a pseudonym for Sora bonathon. So I can try and swoop in on some of those prizes okay enough with me talking you should show before you take it away you should show you how easy it is to write tests yeah I have no idea how to write a test I've never written a rest test in my life. So maybe is there what's in the test file is, that this one right here yes this is a real simple test yes this is a real simple test, that's just calling, that hello function exactly the same way, that you invoked on the command line and there's a little tiny text at the top of the function run test. If you click, that click the run test it's going to run it here in your browser. So you can individually run like. When you're running the entire test like is this. Now individually taking, that component and running a test on it please okay yeah exactly is, that what this mod test thing is how does it link + +[27:00] between the list in the test file the mod test is just telling the project, that oh no what's going on here he's just telling the compiler, that other test rust file should be included. Because that all the rust files are not included by default they'll have to be referenced from, that lib from, that lib and it looks like we got some sort of bug in our GitHub setup. If you run a cargo clean Tyler. And then just try clicking, that run test again no debug it live is this like the most satisfying thing of giffpod I feel I mean it's satisfying anytime to watch anything compiled. But this is really satisfying This thing always gives me anxiety. Because it's like yes I've waited for 30 seconds to get to 142. And then I'll be + +[28:00] an error and like oh shoot. Now I gotta fix, that. And then wait another five yeah there's a lot of dependencies. Because this what this test is doing is there's only what like five six lines of code here but, that M default, that's at the beginning of the test is actually the sorbent environment and it's not a mock environment it's the actual cyberon environment, that gets embedded in the Stellar Core nodes and we're using it here in the test so. When you're writing rust tests here in your contract you are actually testing your contract in much the same way, that it will be running on chain. So you have all the same things available to you can even do things like integration tests across other contracts. So you can import another contract, that you're calling or maybe several contracts you're calling and you can write tests using your contract and all of those contracts together and you know see for real how it's actually going to behave well, that's. So cool, that's is, that's one + +[29:00] of the advantages of having like the entire thing be being written in Rust where you get this rust environment, that can be like pulled out of actually running it in core. But it's the actual environment, that is running in core exactly yay our test passed all of, that for one test okay it needs to be like capital okay I found this yeah it's like okay with an exclamation but, that could also mean it's not okay. But haha no anyone anyways lee there's a fun question, which is this a smart contract to find out what these Halloween costume will be this year yeah I'm gonna dress up as hello Dev it's not a bad idea yeah we're gonna have to have a little bit smarter contract very smart guess well at least Halloween costumes right. But Tyler. So you wanted to ask Lee some + +[30:00] questions I believe yeah. So one of like my main question is I'm even as I'm writing Stiller Quest quests one of the things I'm gonna have to do is okay people are doing their quests they do their hello world like one of the first quests the spoiler alert might be to call hello world I know I can find, that they did, that. But I don't know how to prove, that in this, that they actually submitted hello world and I'm guessing it's something to do with one of these parameters maybe something with the footprint can you just like walk us through a am I going to be able to do like build a Stellar Quest off of the data, that I'm getting inside of the operations to check. If what someone submitted was what I expected. Because this XDR is not immediately helpful to me. And then secondarily to, that the heck is a footprint yeah these are good questions can you + +[31:00] can you zoom in a little bit I'm sorry it's pretty small for me the yeah we've got three Fields here and you're right they don't really tell us a whole lot on the surface there's two tools we can use to understand like what this mumble jumble words are we can either use the laboratory whenever you've got in future net mode so. If you want to open, that up in another tab we can try putting, that into the laboratory putting, that into the laboratory you can also use the Sovereign CLI it also has a sub command where you can just paste this and it would print out this is what the value is. But maybe we can use the laboratory a lot of people. So yeah this is laboratory right here do you want me to copy one of these XDR values over to the view XDR yes okay. So I just copied this one with type object yeah. So what you want to do is grab the XTL type it should be SC Val. If you look + +[32:00] for SC Val it'll be down there deep in the and okay it's not actually rendering something, that's super useful here, that's well I mean maybe, that's, that's b64 yeah base64 decode, that XYZ is a does this convert to okay wait you're singing strange URLs okay waiter oh no what is the what is this Valley we should have started with the second dude I don't remember okay we can start with the second time do the second one first I'm gonna try to see. If I can remember what the first one is and why we can't decode it oh the first one's the contract ID oh. So it's a hack or the contract function or something like, that. So let + +[33:00] me let me run my super powerful other VM called run kit and do buffer a64. And then two string it's probably a hexia, that's our contract ID Maybe ish wish, that was not, that was the actual XDR value this was the a64d so, that's our contract hash right there all right so, that's one problem solved. Now we need this one Sim yes let's have a look at what, that is View XDR + +[34:00] versus also an sc value this is also an essay file so. If you copy, that Sim value and stick it into the base64 decoder it should, that should come out as a string all right without the hex hello sick all right what's wait, that was our, first argument right by default oh, that was the output no, that's the input, that's the function name the parameter was the contract ID the second parameter was the function we're calling. And then all the parameters after, that are should be the arguments fingers crossed everybody wait I gotta turn it out of the XDR into a64. 864 will it say World question mark I can write my checks + +[35:00] haha everyone you'll have to actually do what the challenge says or you're busted amazing and is it almost always so. When I look at view XDR this thing just got absolutely enormous are there many other things, that people will need to be aware of or is SC Val kind of cover most of our ground here scal covers any value, that's scal covers any value, that's being used as like an input or an output or unlike of a contract function as well as anything, that's being stored so. If you're storing something on chain some data to look up again later maybe light balances things like, that they're all ultimately going to be stored inside our cval and SC value is really a union of lots of other types. So here we can see this RC valve is a symbol there are integers there's like byte arrays Maps vectors begins they all fall under, that scale + +[36:00] I spent too long trying to figure out how to decode this and I just had to get on stream, that's all I had to do, that makes a ton of sense all right fantastic well. Now I know how to bust everybody for passing the arguments and making sure they actually submit the right thing next the heck is a footprint this is also an XDR is this I really hope it has something to do with Bigfoot. But I'm guessing it doesn't you can try decoding, that. If you want I don't remember what the type is exactly it's probably The Ledger footprint is the type there's a contract ID and this is not necessarily particularly interesting for this specific contract. Because this contract doesn't access any data or anything like, that. So what well footprint is it's us telling the runtime what data the contract may write to or + +[37:00] read from and the reason this exists is. Because we don't we want the runtime to know what data might the contract will need to be accessing. Because that helps us do things like run contracts concurrently at the same time. So so we're building sarban with the advantage, that we've seen other advantage, that we've seen other blockchains already build you know smart contract runtimes and we know, that scaling blockchains is difficult. And so we're planning, that from the get-go with Saruman and one of the foot the footprint is like part of the idea is, that. When you invoke a contract you define what data might need to be accessed by the contract. And then the runtime knows how to paralyze contracts, that don't need to access or write to the same data. So in this contract invocation. If we looked at the code you'd say it's not accessing any data. So why is there + +[38:00] there's one entry here there's one read-only entry the contract data and the key is not particularly descriptive it's a static IC and what, that is the contract code IC and what, that is the contract code itself. So we're saying, that with this contractification the only contract data it needs to read or write is the actual contract code, that gets loaded into the runtime but. If you were writing. If you had a contract where you were updating a balance you would need to include The Ledger key in this footprint for the balance, that was planned to be updated I think, that's making sense I know they're. So yeah there's a question here. So what is the footprint of a function, that uses an environment invoke contract all right yeah, that's a good question it should be it'll be the footprint of whatever, that other contract requires. So at the + +[39:00] very least it's going to require the contract code. So there'll be like another entry like what we see here on screen except the contract ID won't be the calling contractor will be the contract, that's going to be called and then. If that contract needs to go and access any data you would also need to being pre-included you need to be predetermining what data is going to be accessed and including, that in the football is there a limit to like the depth, that can go before it becomes difficult I mean there will be limits for the runtime especially like once it gets to pubnet I think we're still figuring out a lot of what those you know those levers should be set up. So for future net I mean future net doesn't have fees enabled either. So you know these are things, which we're building out and they'll be in building out and they'll be in future net iterations yeah I think a question, that maybe arises from this is you know I said + +[40:00] that the footprint is something, that you have to predetermine you as the person building the transaction here to go and figure out what the footprint should be and, that maybe can be a bit of a daunting task. And so we have this thing called pre-flight or simulation where you can submit a transaction without the footprint. So no food required and it's not executed on chain it's just it's simulated. And then you get back it basically gives you back the footprint. So you can. Then just go and submit the transaction with, that footprint. So you don't really have to determine it yourself I know. When I was looking at the network requests inside of Paul's example dApp there was loads of those preflights whenever something was happening to go and find what the actual submission call should look like and it's also the way, that you do read requests so. If you're. If you don't actually need to submit something networking you just need to get data out of the network you can use those pre-flight requests to grab + +[41:00] information from The Ledger from The Horizon endpoint without actually having to submit the transaction, which I thought was kind of cool there are a few other questions here it says like why have one type of SC value for everything architecture-wise question mark, that's a good question I think the values get used in a lot of places where you know like as function inputs function outputs and we really need one type, that's going to contain all those other types yeah, that's what I got, that flavor of design really comes from like the way, that XDR like the Stellar network, that XDR like the Stellar network uses XDR for structuring all of the data they get stored and, that's just a really common way of solving, that problem in the XTR + +[42:00] I'm trying to see there are a few other questions here, that I guess we can shift a little bit. But it says how should one think. When writing Forum on contracts, that involves not mucking up keep it small and apparently write good tests. Because I made it a long way. And then I had to write a test and it broke. So so Rico tests start out really small I don't know have fun right Now it's like on futurenet. So play around learn what you're doing. But yeah Nisha you'll figure it out I believe in you do the Stellar Quest, that'll, that'll go a long way sorbonne Quest you mean Stellar requests saravan quest Quest version series I do have another question and this is particularly coming from a JavaScript engineer perspective + +[43:00] is there a way to like watch contracts so, that as you make changes it kind of runs tests and compiles in like as you save files or is, that something you have to like always be building always be testing is the idea, that you just kind of Click run test every time you update some code sort of thing you fix your tests you sort of thing you fix your tests you write your code you run tests again what's the development flow tend to be. When making changes yeah I mean there's a couple of options here it really comes down to you know what you like to do. If you're a vs code user there should be my shortcut codes are customized. So I won't say what it is. Because I don't know what the default is. But there's a shortcut code you can press to run the last test or run the test under your current cursor. And so a common development flow is you just keep hitting, that key after you've made some + +[44:00] changes and it'll go run the tests you care about in, that moment there's another great tool called cargo watch, which you can it may actually be installed in this GitHub I think. If I saw it was installed in the docker file and. If you use cargo watch by default it probably just builds and, that may not be where we want but. If you I think it's like cargo watch space Dash X space test and, that will just run the tests every time you change the files cool yeah. So this I think by default it just right just builds. And then tests yeah, that makes sense and I will definitely help it's just a very foreign environment. But it's not a poor one it's just a very foreign environment for a JavaScript developer. So knowing what, that narrative is like for actively working on a contract is an important one + +[45:00] that was just a link, that I wanted to share well I guess Kanye West wanted to share yeah. If there are Rush developers in the chat, that have the ways, that they run these a submit it to the sorobonathon. Because I need to know how to build these faster and B put those in chat with links and such yeah it's one of the things I love about the rust Community is it's very aware of how hard it is to learn rust. And so people are very eager to share all the things, that they do and the tips and tricks they have for getting up to speed and actually building contracts and stuff yes. So you can yeah you can write unit tests. But you can also write integration tests in the sense, that. If your contract is using other contracts you can download those contracts at the moment you just you have to manually download them as a DOT wasn't file in + +[46:00] the future you know hopefully we'll have some like better mechanics around, that where they can just be automatically downloaded. But yeah you report, that wasn't file you can register, that contract in your test. So it's there registered in the environment just as. If it had been deployed on chain. And then your contract. When you're running its tests can be calling through to it. So you don't have to set up mocks you don't have to set up you know these fake tests with your. If you're contractors interacting with all these other things and we don't have this implemented today. But the environment can have data pre-loaded into it, that environment, that we saw in the tests. And so we do want to build toolings to do things like take data, that's on chain like. If you know you basically take like a specific Ledger. And then you could write tests, that actually use contracts, that are actually, that have been actually deployed as well as data + +[47:00] that's been actually, that actually existed on let on The Ledger at, that point in time. So so there's another question, that came in through the Forum yearly, which says. When you execute a contract who provides the computing power to process the function normally. When I write a function and run it's my PC or server I arrange, that's burning some CPU Cycles how does, that work with colorbox is the question like during testing or do you mean like. When the contract is actually running I feel like they are probably referring to. When the contract is running execute yeah. When Tyler like deployed the contract on chain he uploaded all of, that the code onto not the compiled version of the contract onto the chain and then. When he invoked it he was giving the contract he was giving he was telling the chain like go and run this code. So the code is running on all of the nodes of the network and the nodes are all agreeing on like this + +[48:00] is the outcome of running, that got it decentralization there are one more question, that came in here, which says how Soroban adopted zero knowledge grouping Soroban supports yeah I think we're like very interested I think in supporting like different crypto Primitives and. If you go and have a look in the Discord you know with there have been some conversations around like what crypto Primitives should we be supporting you know how for supporting things like, that right. Now the only crypto Primitives implemented is available in the Soroban SDK are just the really basic ones, that you would typically see in the Stellar ecosystem today things like shell 256 and add 25519 keys. But + +[49:00] we're definitely very interested in finding out like you know what people want or see, that they need and we have ideas about what can be added and you know we'll definitely be looking into, that yep see I think, that is about all the questions, that there are anyone else have any final questions here I'd be curiously as you kind of have a really broad knowledge of Soroban where it's going and you're working with internal and external teams on road maps and are there things you'd really like to see people work on or provide feedback for or what's kind of the most exciting thing, that comes across your desk as far as Community engagement or things, that people are doing or maybe areas of opportunity, that you feel like people maybe aren't quite recognizing yet, that you'd like to ensure, that people are aware of and like what would you like to see come through sarbanathon yeah I think definitely the big ticket thing is writing contracts like getting more + +[50:00] people writing contracts providing feedback around what they've found convenient or inconvenient around what they have found to be you know conceptually made sense you know they wanted to do X and the SDK helped them to do xor or didn't help them to do, that or got in their way we def like, that's I think probably the biggest thing is dog fooding the biggest thing is dog fooding writing contracts and discovering what's missing what the gaps are all, that sort of stuff, that's what really interests me and I've seen like a lot of good posts on this sorry I can't even say this sorry bethon, that have been very interesting and you know people exploring how does authentication work and ex providing examples of where it works really well where it's you know got some rough edges and like, that's got some rough edges and like, that's fantastic. Because that gives us all as a community the opportunity to + +[51:00] figure out what needs to change between. Now where we're at right. Now future. Now version one what needs to change between. Now and getting to test. Now and pub. Now down the road yeah absolutely we actually had, that exact example come through this week I believe there you go wow Raph is already on it like sharing the links to it. But yeah no definitely like I think we wanna the whole reason for sore monathon to exist is like simply to help with the development and like dog fooding Soroban to like the ecosystem. And then to let our Dev team know how can they actually take it and improve Sargon right like we're listening basically I see another question here, which says oh yeah I think Connie Nets you have the right answer there where it is would SCF allow the next round is lower Bond related projects, that's probably like a it I would say yes probably. But don't quote me on, that. But yeah I'd say well we'll support + +[52:00] once it hits mainnet. Because the point of SCF is to run live businesses kind of in production and. If you're please don't run a production level business on a test net or a futurenet. So be working on your business ideas. Now you can't just spin up a business in a month some of you probably could so, that there's like get started get ready so, that whenever there isn't a CF round, that is accepting. Because they're, that are built on top of Sarah bonathon Soroban wow it's not called sarobon anymore. Now it's just Sarah bonathon how the tables turn. Then we'll be ready to accept those inbounds. But it's definitely coming we just want to encourage people building businesses on top of test Nets, that said I would love to potentially we were just at like since this is like I consider this like a very casual conversation with our community. But we're considering maybe doing like a hackathon in the future by + +[53:00] Future I mean like next month or the month after where basically it would be for you to come up with those ideas you can submit proposal Soroban related ideas and like do example contracts. And so on. And so forth. So we'd really like to see you like we don't want to stop you from ideating at the moment so. If you had ideas please start working on them. Because there will come a time. When we want those ideas brought to us from the community we want to support you the best way we can. So yeah and I'd say we'll probably like surabana Thon is sort of bonathon First Light we're gonna run more right the next one will be sorobanathon super tools or something where we're coming up with like a new theme new funding round looking for a different type of content and one of those might be some of the tooling, that we really want to see on the, that we really want to see on the network some of, that like opens Upland type contracts as the platform continues to mature and change less + +[54:00] it'll become safer I guess to begin writing more of these mature production ready contracts. But we've got to be careful in the sequencing of things, that we don't write contracts, that are just going to be constantly breaking or make it kind of disincentivize an environment from breaking things in order to improve the final product. So you want to keep things a little platoy in the first early stages so, that the first early stages so, that it's fine to constantly be breaking things. Because nobody's built anything to mature yet the pub net is the takeaway here I guess. But yeah I think, that is everything for this week y'all once again Sarah bonathon is still a thing it's happening it's going to keep happening and keep playing. When tinkering with sarmon like you literally can earn excellent for doing, that I don't know why you aren't like I said I'm always ready to quit my job and start working on sarban. But I do like my job I like talking about + +[55:00] it. But yeah anyways y'all thank you all for joining we will be back next week on Friday with senso actually talking about his assembly script SDK. So stay tuned for more details on, that we're really excited and, that's always a good time to just ask questions in general about sarban well, that's sarban thanks, that's a wrap on Soroban talks yeah thank you thanks a bunch see ya new here Lee + +
diff --git a/meetings/2022-12-12.mdx b/meetings/2022-12-12.mdx new file mode 100644 index 0000000000..feaacdccef --- /dev/null +++ b/meetings/2022-12-12.mdx @@ -0,0 +1,96 @@ +--- +title: "Get to Know Ginger Baker" +description: "Denelle Dixon sits down with Ginger Baker, the SDF’s newest board member, to explore her career path and leadership lessons—from building financial access products across big tech, traditional finance, and startups to practical advice on communication, hiring, and staying grounded in real-world user needs." +authors: [denelle-dixon, ginger-baker] +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Denelle Dixon introduces Ginger Baker as the Stellar Development Foundation’s newest board member and frames the conversation around Ginger’s career journey, what motivates her work, and where she sees the industry heading. + +Ginger shares how her work has consistently centered on the intersection of technology and economic empowerment, with a focus on financial inclusion—especially the practical infrastructure needed to help people move money and access services in everyday life. + +### Key Topics + +- Ginger’s background and leadership path, including how she “stumbled into” leadership by following curiosity and caring deeply about the problems she worked on +- A career thread of financial inclusion across roles at Visa, Facebook, Square, Ripple, and now as Head of Financial Access at Plaid (portable financial data access) +- Communication as a learned leadership skill: adapting tactics to be heard, including visualizing ideas (e.g., whiteboarding) when discussion dynamics get challenging +- Lessons on leadership as a woman: balancing assertiveness and likability, and prioritizing respect over being liked +- Learning from failures in real time: pivoting when projects go off track, including adapting a Rwanda initiative from a single product goal into a broader public-private partnership approach +- Advice for builders and professionals: stay curious, ask questions relentlessly, test assumptions, and “blossom where you’re planted” by showing up with excellence regardless of role +- The importance of team culture and fun as a performance multiplier, including simple rituals that help teams connect even when remote +- Why expanding financial access for women matters: research-backed outcomes and firsthand observations of women managing complex informal financial systems and operating key cash-in/cash-out endpoints +- A hard recurring ecosystem challenge: making on-ramps/off-ramps work (incentives, retail endpoints, agent networks), and why solving this is central to shifting from cash-heavy environments +- Product strategy perspective: start with a clearly defined segment and real-world problem, then scale by identifying repeated patterns across ecosystem needs +- MoneyGram integration use cases: combining Stellar’s global movement rails with MoneyGram’s trusted physical network and local community presence to improve access without requiring bank accounts or cards +- Closing reflection on staying grounded while aiming high: keeping “feet on the ground” while continuing to reach for ambitious outcomes + +
+ Video Transcript + +[00:00] Who don't know me. My name is Denelle Dixon and I'm the CEO and executive director of the Stellar Development Foundation, also known as SDF. The Stellar Development Foundation is a non-profit that supports the growth of, and the ecosystem of the ecosystem and of the Stellar network, which is an open source public blockchain network. I'm really excited to be here today and introduce you to Ginger Baker, she's SDF's newest board member. Today we're going to learn a little bit about Ginger and her career, her journey, what Drew her to SDF's work and the direction that she sees out there for the industry and where our industry is headed. Hello, Ginger. Hi, danil, thanks so much for having me. It's fun to be here. So it's so great to have you and to kick us off. I was hoping that you could tell us just a little bit about yourself, sure, so I'm Ginger Baker. I live in Northern California. I grew up in various parts of the US, but I do consider myself a global citizen, hopefully, or working towards that through some of the work I've done. I + +[01:00] Have two kids and a wonderful husband and a brand new dog. So those are probably the great Facts of Life. I'm also about that brand new dog doing. I'm sorry, what was that? I said? How's the brand new dog doing? He's great. He's still very much a puppy. So less than a year, but he's fun. He's fuzzy. He's like the fuzziest dog. He's almost like a stuffed animal with all the fur. So we're enjoying him very much. Yeah, so that's a little bit about me. Also. I'm my son's basketball coach, which is a very big deal for me because I'm a huge basketball fan and I've played my entire life. So I'm the coach of the middle school basketball team right now, which is definitely a favorite thing for me each year. And how about your career? Yeah, so my career has really centered around, you know, that intersection between technology and how it can be applied to economic empowerment. So if you think about that intersection, the topic of financial inclusion has really been the thread that's tied almost all of + +[02:00] My experiences and roles together. So I've worked on financial included related products or ecosystems or relationships or whatever it happens to be at both big Tech, so I worked at Facebook. Traditional financial services company are more traditional companies like visa, and then startups like Ripple and square and I'm now the head of financial access at plaid and that's I've been really excited about my career. Like taking the approach of looking at the problem space or the opportunity space from a lot of different frames has been fun. I also tend to, you know, sort of dive in deep into a particular Market or segment for a particular period of time, and then I also sort of pull back to look at stuff more from a macro standpoint. So I spent a lot of time with visa and Rwanda when I was working on financial services in messenger in the Philippines and so, yeah, my role at Platt is to build the tech and the relationships that help people to have access to completely + +[03:00] Portable financial data so that you can take the financial data that sits in your bank account or wherever it resides, and move that data to another application or service to Avail yourselves of the value of that service. Well, we found Ginger because we thought her background was just fascinating and we thought it really connected so well with the focus areas for SDF. But, honestly, Ginger and I really bonded over this whole Middle School basketball thing and we know how challenging that is. So I just love the fact that you're the coach, and I know how hard that is on so many levels. First of all, I want to have fun playing on your team, but also just to be the coach of Middle School boys. To be clear, I think if our kids actually played against each other, we might not be in this relationship for a career standpoint, because I don't think we could. It was fierce, it would be too competitive. I think I'd love to dig a bit deeper into your career Journey. So with stepping into leadership roles like the one you have + +[04:00] Now and the ones you've had before, sort of always a part of your plan? Interesting question: part of my plan? No, I think you know. I definitely would say being a leader has generally been something I've stumbled into I. I've been a leader for a good chunk of my life. I mean even as a child, you know, in high school. For example, I was the president of our class, but I wasn't because I intentionally wanted to be that, I just there was something I was interested in or a topic I wanted to pursue or something I was curious about and I wanted to go and do it, and so that's. You know, I guess it wasn't so much of a strategy around it as good luck perhaps, like being in the right place at the right time and just caring enough to want to, like, lead a group of people through something. And I think from a career standpoint, it's been fairly, similar where, again, like this intersection of technology and economic development. I just I am really fascinated + +[05:00] With it and I always have been, and so I have a lot of passion for that topic and so I will apply that passion and try to bring people along with me to go explore something new or take a look at something. And again, I think being in the right place at the right time has often been a part of getting into leadership roles. So like at the Forefront of maybe a new technological advancement like the proliferation of mobile devices or you know, some kind of social change like the adoption of messaging services being such a central part in people's lives through WhatsApp and messenger. So it's been a lot of luck. But also because I really care about the topics I work on and I try to get people to come along with me as we explore those opportunities. I can't imagine that it's always been easy for you to step up into these different roles. Do you have a particular memory of something that was challenging, that made you even be better or that caused you to think about things differently? Yeah, I think communication has been a big piece of my Evolution + +[06:00] And I re, I remembered times when I felt like I couldn't quite get my point across, and you know, maybe sitting in a room with other leaders, you know, and I think the intersection of, sometimes, the male- female Dynamic has always been a little bit interesting in my career. But there's certainly been moments when I felt like I couldn't quite get my voice to be strong enough, and oftentimes my team will attest to this. I love to draw on the whiteboards and I think that this was actually a tool I developed because I couldn't get people to listen in a way that I thought I was being well understood or given proper, a proper forum, and so I would say something. I didn't feel like people were listening, the people were talking over me or my point of view wasn't coming across. I would just stand up and draw it out. That doesn't always work, because I'm not great at drawing, nor do I have good handwriting, but sometimes, just like, documenting the idea would sometimes help to get that through. So communication tactics- and they have to be + +[07:00] Different in every situation, you know- has been something I've continued to try to pay attention to and evolve based on the situation and the group of people I'm with. You just mentioned that maybe it was the male female dynamic or just the fact that you were a woman in a leadership role. Tell us more about the challenges or the misnomers or maybe the benefits about being a woman in a leadership position. Yeah, I mean, I think there's this balance you need to strike as a female leader between being assertive and being likable, and that's not always easy and I think early in my career I really over indexed on the likability, like if people like me, this will go well and I was a little nervous about being too assertive, I think also because the expectation is that I wouldn't have been a service and so it felt like sort of overriding expectations that folks may have had. But I had this great mentor still + +[08:00] Do, Sarah fryer. She was the CFO at Square when I worked there and she's now the CEO of Nextdoor and I remember she told me one time- I just looked really directly- like it doesn't matter if they like you, just need to make sure they respect you right. And so what is the combination of how you need to exist that drives respect over likability? And I think for women, unfortunately, or maybe fortunately- that likability component is an important part of what we do and how we exist in the world and we are. We even carry our own biases around that too. But having somebody say that so directly to me, which is like you're over indexing on likability, like make sure you're investing on respect, was a really good, like waking up moment for me. Oh, I think that is such a great piece of advice, one that I didn't get early enough in my career and I definitely over rotated on the likability. I gotta be honest, sometimes I still do. Sometimes I see out there the social media responses to things we do or we say, and I'm like, really, so I definitely feel like it's something, it's a journey that we all can- probably + +[09:00] Not just women- that we all can benefit from that kind of advice. Sometimes I think that there's more that we can learn from the failures that we've had than our successes. I always tell my boys this: that every failure is a chance for you to do something differently next time. So anything that you'd share along those lines and what you've learned in those processes- yeah, I mean I've made a lot of you know mistakes or, you know, failed quite a bit. I failed early in hiring like I was not a great. I did not hire great early in my career. And I think you know getting the right people on your team is super important and it's not something that you're going to wake up in the morning one day and be like I'm going to become a manager and I'm going to be really great at hiring, like you know you really have to work at it. You have to understand, like, what makes great team members and how do you hire correctly. But not all failures. I mean if you're watching the evolution of something that you're working on. That's sort of starting to go off track. Like you can turn that failure into opportunities. Certainly, + +[10:00] When I was working for Visa in Rwanda, we went there with a singular intention was still was- to launch a Visa's First branchless Banking product. So this was a product that existed only on mobile devices and did not have a plastic card Associated. And this was back in you know 2011 when I moved to Kigali. So this was like a very interesting idea at the time and mobile money was starting to take off in East Africa. But it started with a very singular product proposition. We're going to go and do this product, but as we begin to hit different challenges and roadblocks in that the evolution of that product, we began to understand stand. Actually, it was because the environment there and the government that we were working with in Rwanda was needing more from us than just that singular product, and actually what they wanted was they wanted a more fully, a more full and robust public private partnership between visa and the government of Rwanda, and so the fact that we- you know, noticed that things were not going as we had hoped. But instead of bailing or just bowing + +[11:00] Out completely, we really tried to dig in deep to understand what could we do differently that would open up doors. And then we modified what we were doing. We still launched that product, which is called mvisa, which was great. But probably the best thing that came out of that experience was just how do you constructively engage with a national government as a Fortune 500 company, especially in a continent where there wasn't as much experience there, you know, from the work that Visa had done in the past. So, anyway, I think there's ways that you could take these failures and, yes, learn from them in the past, but you can also take the failure in real time and, like, pivot it to turn it into a more interesting opportunity than you were even expecting. Yeah, I love that. I think that you know. One of the things that I always really like to have are these goals that we set out, either if they're product goals or their goals for us internally. And the thing about a goal is that you might actually pivot along the way and you might not achieve the exact thing that you set out to and you need to be willing to make those pivots and to learn for what's going on in the context that you're in. So I do think that it's really + +[12:00] Hard. You can't identify failures in your life. I certainly have plenty, but I also have a lot of those opportunities where we were able to Pivot to make it better and more right and more constant with the situation. So I think it's really awesome to be able to take in that context and to listen, and I think that sometimes we forget to do that along the way. On a similar note, what is one piece of advice that you give someone watching this, no matter what stage of their career that they're in? Well, I mean just to build on what we were just talking about, like I would ask a lot of questions like: ask questions constantly, test your hypotheses, like things change, like you really need to. You just keep asking the next level question, and I think that having that's actually what I hire for. You just talk about something we were talking about earlier. That is actually what I learned for people that I work with, like I want them to be really hungry and super curious, and so I guess you could tie that into advice for folks: like: be great, like no matter what you're doing, + +[13:00] Just be great. And I think that phrase of like Blossom while you're planted is really important, especially early in your career. I think the opportunities that I have gained or like been I've been available to me and even still today that are available to me actually come a lot from the people that I interacted with in my early 20s and I think I mean that was a long time ago- don't need to say how many years. But you know, I showed up hungry and motivated and I really cared, like I really cared about our outcomes, even if the role I was in was not actually what I thought I wanted to do, or the thing that I was working on actually didn't actually have that much personal interest in it. You know just how you show up can really matter. I think the other thing is like we should have fun. You know life is too short and you should have a good time with what you do, and I think that's really important. It helps with motivation, it helps with Team camaraderie. + +[14:00] When I was at Facebook, we had a great team called mobile financial services at Facebook- one of the best teams I've worked with- and we did some really amazing stuff. You know, we increased Financial inclusion in the Philippines. We created this great product that existed in Emerging Markets where people could link accounts or create Financial accounts and use it to send money or pay bills or buy airtime, like there's a lot of good stuff that came from that product. But I think the best thing that we did was we had fun as a team, like we had a cowbell. that we used on Friday afternoon meetings. We had a CAP mascot like I don't know, we just we had a great time. And when people have fun like they want to show up, they want to get engaged, they are, you know, they're just naturally more productive. So those would be a couple of thoughts. I think that is such great advice. I think the have fun one- it gets lost- One of the things that I decided early on, especially once I had kids, is that, since I didn't have the choice about whether to work or not work, I needed to work- I was a single mom at the time and I had three boys and my focus was that I needed to really be in a role and to be doing work that inspired me, that constantly + +[15:00] Challenged me and that I liked doing with people that I liked being with, and it was those choices that really caused, like, all of my decisions about where I was going to work. It wasn't about the big name. It was about. Am I gonna the these people? Are they going to inspire me to even be better than I am? And I think that have fun part is something that we can't lose, and I get a little fearful sometimes in today's world that we're not having enough fun because not all of us are together, right, and I and you know because of all the things that have happened over the last many years, but I think that is such a crucial thing. So have you been able to do that at all of the roles that you've been in? So, probably not all of them, but I do try- like now, in fact, I'm looking to see if I have one of the props around we have like a hat thing at plaid- oh, I do have one. Well, here's my take credit hat. Like we're constantly wearing hats and I don't know how we came up with that. It was just this random moment that happened where all of a sudden, it made sense for us to put on weird hats in a meeting and we've been doing it for + +[16:00] Two and a half years and that's it's easy to for us to express ourselves because you can see it visually from a virtual distance and you don't have to be together to have that you know joke or funny thing that you do, like it's something that we could do remotely. But again, like we didn't plan that, it's just. You know something that you stumble upon when you really want to connect with your team and when you're having fun, like you have to be in you're environment where you feel comfortable randomly putting a hat on your head and making a joke about it and feeling like people are going to accept. You know you're being authentic. You know that it's this: just a natural part of existing as a team together is that ability to play. Just, I wanted to point out that if you have questions, go ahead and put them in the comments, and I'm going to try to look at those as I'm asking questions as I go through here. So I know you started your career focused on financial services in emerging markets and you told us a little bit about that. In particular, you're a big advocate for the impact that expanding Financial access for women in these markets can have. Why is + +[17:00] That? Why has that been a focal point? Is you? Yeah, you know obviously there's- or maybe not obviously there's- research that shows. You know that when women are in control of limited budgets, that they will traditionally or typically invest more in education and health and things that are going to help their children to prosper. And I think you can tie that to not just building strong children and strong families, but then building strong communities, and so I think that there's like a you know, research, proven tie between those two things. But most experiences that I had in trying to, you know, build some of these tools and products and markets around the world that just really sat with me. I mean there was we were doing research in Bangladesh- this is when I was with Facebook- and we were trying to understand motivations that someone might have to use a digital financial management tool. And in order to understand what you would + +[18:00] Potentially build to Aid someone in those desires, you really have to understand why they do it or what it is about their lives that could be improved. And there was this one woman talking about, you know, her ability to manage the money for the family, based on the very wide swings of income that came from her husband's job, was like that was her identity, like her ability to be creative about how that money was managed to be able to make it last in, you know, Seasons where income was low and be able to save it and hold it back when it was high and to do that, like with a very intricate web of very informal systems- everything from storing it in a container in the kitchen, to giving some to her sister, to hiding it in places, to stitching it into clothes, to buying jewelry because that would last longer and hold value. I mean, it was just amazing what this woman was holding in her head in terms of all of the things that she was doing to have the best, you know, Financial outcome for her + +[19:00] Family, and so not just that experience, but Global money agents that we worked with in the Philippines. You know, more than 50 percent of the agent network we were working with were women owned businesses and they were just all business like they were. I was so impressed with them, right and so like finding ways that you can Empower women in these roles, not only as the leader of a family, but also a leader of a small business- and having more opportunities, you know, to be part of these networks that we're investing in: on ramps and off ramps for fiat currency to digital currency. Like you know, these are the places that are on the Forefront of technical Innovation, and having women be a part of that, I think, will not only be good for them, for the products that we're all building, but also for the communities in which they live. It's one of those things that the research that has been done, I think by the World Bank- it's talked about the fact that when women are engaged in financial infrastructure and are able to use those tools, the GDP of the government goes up and or the country goes up, and it's one of the things that the one of the reasons why I am so deeply + +[20:00] Focused on it. When I was at Mozilla, we were building a Firefox operating system for a mobile device. It was low, it was for low end mobile devices that could be used anywhere, and it was built on the HTML5. So we were, like, very interested in the open part. But one of the things I loved in partnership with these different carriers was the fact that we could look at local problems, and one of the problems with the women were trying to make money, and one of the things they could do is they could sell one of the extra meals that they had that night, and how that would just improve things for them, and so giving them the tools to do that just changes the whole dynamic for the family. So I, too, like I'm just so enamored with that and so really, truly want to help there and I'm so grateful for the work that you've done, and certainly one of the things that we've talked about is those on and off ramps and the work that you've done historically in Africa and all over really helps us to be able to have the sounding board from you to talk through these really important issues and to hopefully get them right. Solving these local problems, yeah, and if you think about- I made a flip comment about, you know- + +[21:00] Moving cash to digital, you know digital Fiat to crypto, like there's a whole host of change that have to be solved, for, and you asked a question earlier about failures. I don't know if I want to put it in the failure account, but I think a very large learning from some of my experiences is that the how do you properly incentivize, motivate those retail outlets or those end points, many of which are humans running small retail shops, some which are also digital, but like, how do you get that the on ramps and off ramps solved if you really want to move out of cash based economies and into a digital world like that's? To me, that was the hardest part. It's been the hardest part in three different- you know, three different companies and three different products. I've worked on like that piece is the hardest and, yeah, a lot of those endpoints are women, and so I just think it's a really interesting problem that we're going to have to continue to keep solving. So there's a question here that may be a little bit too specific, but I'm going to go ahead and grab it. I have a sarban, if you + +[22:00] Remember, is our smart Contracting platform that we're building and I'm going to ask you about where do you? The one of the questions is: what is the biggest underserved area in fintech that you feel Stellar and sorbonne, or just Stellar, could really help fill over the next 36 to 48 months. Yeah, I think it's a great question and I'm gonna- it's not a cop out answer, but I really mean this- which is, what is the place, the segment, the people that you're trying to serve, and what is it that they need in order to do the thing that they want to do, take the action they want to take, have a better livelihood. You really have to hone that down to something very specific and small to get started. So, thinking about, like all these broad industry problems, I've always struggled being successful if I take too broad of a mindset or too broad of a view on things- and where I felt more confidence that we were building the right things or investing in the right kind of Technology + +[23:00] Was when you could get very pointed about. You know, this is the ultimately the problem we want to solve in a global basis, but we're going to start very locally, and so who's the segment that you're trying to solve? Is it artists? Is it low income women? Is it online businesses and e-commerce like what's the thing that you're trying to solve? Who are your customers? And then work your way into the technology that needs to be built from that real world problem, and I mean making money work better for people and making it more fluid is what Stellar is all about, and I think that can apply to so many different situations. I love the model that Danelle and team have of like identifying and finding. You know, businesses and entrepreneurs who are building in this ecosystem based on real world problems, and I suspect that what gets built and what those problems are is going to be fairly diverse, right, and so the trick is then to pull up and find the similarities and the you know the patterns across those things. Well, they're here to that. There's so much noise out there, though, and there's a focus on the hype of crypto and + +[24:00] Blockchain, like the trading use cases. The speculation, so much of our attention over the last month has been on the awful failure of FTX and how that harms so many people. So we often try to instead get to people to see the value it brings to the financial system here. Do you want to see our industry Focus somewhere else or there, or how do you think about it? Meaning, like, on people versus not? Yeah, or you know we're trying to focus on these real world problems, as you just mentioned, and we're really trying to focus there, but is there something else, another way to attack that? Okay, so I think you have to base it there. But you can also think about the systems that are required to eliminate the pain points. That, again, once you pull up and you look across those patterns- like where are the pain points exist that are similar across different types of services? I'm going to cheat and just use plaid for a second, which is, like you know, quickly realize that people, individuals, again + +[25:00] Wanted to tie their financial data or move their financial data from one particular place to another, but they didn't learn that by talking to people like. They had customers and businesses that were trying to build great products and services for people, and they realized many of them had the same need, and so it doesn't have to be the person that you start with. It can be an ecosystem pain point. It can be a group of customers who are telling you over and over again that they have something that they want to solve. This happened again, cheating a little bit here with plaid, which was like you know. We were hearing that people wanted to be able to see their crypto Holdings alongside Holdings that they have in other types of investment accounts, savings accounts, checking accounts and even loans, and so build So that was a pretty specific pattern. We were seeing across a number of different customers who were focused on Building Wealth Management Solutions, and so what did we do? Well, let's go build Integrations into digital asset exchanges and other crypto companies so people can see the balances of those accounts. + +[26:00] But we didn't. You know, there wasn't a lot of talking to the individual person about that. It was about seeing the system and the pattern of the request across a number of different customers that were valuable to us. Yeah, using data to sort of help to drive the solution as well. Another question from the audience is: what is your perspective of the most attractive use cases that can be achieved working with MoneyGram in our, in the MoneyGram integration? Yeah, so I think that's an interesting one. I mean, I think there's like two areas with an organization like MoneyGram that are probably interested in looking at. One plays right into what we were talking about earlier about the agent network. I mean, they have such a strong, visible and reputable, you know, money movement network that local communities understand and see and I think you can not dismiss the value of that is a very hard thing for a company to build. Moneygram's been doing this for, you know, decades. Right, but if you take new tech companies and you think about their relevance to physical Cash locations in local communities + +[27:00] Globally, that is a very hard thread, you know, to tie because one that company may not be known, certainly their offerings and financial services aren't going to be known, so the amount of marketing dollars and communication that have to go into building Brands and local communities is really challenging, and MoneyGram has that in Spades, and so I think, like the combination between Stellar's technology and the ability to more easily move funds around the world in addition to MoneyGrams brand and prominence in local communities is like- is a fantastic offering. Yeah, Alex, the chairman and CEO of MoneyGram says they've been getting disrupted for like the last 50 years or something like that. It's this joke that he makes across the board, and the great thing is that this isn't about disruption. As much as it about really enhancing that traditional Financial infrastructure and leveraging the value they've built over all of these years and really bringing that to blockchain so that you don't need a credit card and you don't need a bank account, + +[28:00] Which a lot of people don't have outside the U.S. And outside of Western countries. So it's definitely something, for it's the one of the things I'm most proud that we were able to bring together. All right, I have one last question for you, because I think we only have time for it. So if you had a billboard in Times Square and it could say anything. What would it say? S while they're Ginger, come on, okay. All right, let me think. I mean something, probably super cheesy, like you know, be authentic or something. So when I was a kid, this is gonna date me, okay. So maybe we delete this part of the segment. When I was a kid, there was this America's top, America top 40. There's a guy named Casey Kasem who used to say: you know you feed on the ground and keep reaching for the Stars. Like I think it would be something like that America top 40. I think that's how it went, yeah, and that just really sticks in the brain and I think that it's also a great. It's + +[29:00] A great statement. Right, you have to have your feet. You have to be realistic, you have to be grounded in what's going on, grounded in your capabilities of your team, grounded in the capabilities of the technology you have, grounded in the realities of the world and the ecosystem in which you're working, in the partner Dynamics. But you can't stop, you know you can't stop trying to go to the moon. So, anyway, something, I would just 100 borrow Casey cases. Well, I loved that actually came on Sundays and it was one of the things that I was. You guys don't understand out there who are not having had to experience this part, we didn't know it was the top 40 unless we sat there on Sundays and listened to Casey Kason and you would tape it. Did you ever do that thing where you would wait for the? Yeah, then you would to press the two buttons on your tape player so you could record it and listen to it later. It was a big deal. It was a really big deal. None of you guys understand how cool it was way back when. Yeah, and now we're talking about Stellar, right? Yeah, things have changed. Things have definitely changed. I am so grateful for the time, thank you so much for joining us. I'm so grateful that you are a part of our Stellar family and I + +[30:00] Look forward to all the more we and I together. Thank you, thanks so much for having me, Danelle, great to see you. + +
diff --git a/meetings/2022-12-15.mdx b/meetings/2022-12-15.mdx new file mode 100644 index 0000000000..e3dada2205 --- /dev/null +++ b/meetings/2022-12-15.mdx @@ -0,0 +1,124 @@ +--- +title: "Something BIG is coming!" +description: "Stellar Development Foundation and UNHCR unveil a live blockchain-based aid disbursement system delivering digital cash assistance to internally displaced people in Ukraine, showcasing how Stellar, USDC, wallets, and global cash-out networks can power transparent, accountable humanitarian aid at scale." +authors: + - anna-greenwald + - carmen-hett + - dante-disparte + - denelle-dixon +tags: [community] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +The session announces the launch of a live, production-ready blockchain aid disbursement system built on Stellar and implemented by UNHCR to deliver digital cash assistance to internally displaced people in Ukraine. Speakers explain how months of collaboration, testing, and integration resulted in a solution designed for real-world humanitarian use, not experimentation. + +The discussion focuses on why cash assistance matters, how transparency and accountability are achieved dollar-by-dollar, and how combining blockchain infrastructure with trusted institutions enables fast, portable, and auditable aid delivery—especially in crisis conditions where speed and trust are critical. + +### Key Topics + +- Launch of a live Stellar-based aid disbursement system used by UNHCR for internally displaced people in Ukraine +- Role of cash assistance in humanitarian response: choice, dignity, financial inclusion, and immediate access to local markets +- End-to-end system design: + - UNHCR manages eligibility, enrollment, allocation, and beneficiary protection + - UNICC provides backend technical implementation and reporting integration + - Stellar serves as the blockchain record of truth + - USDC (issued by Circle) provides a stable, one-to-one backed digital dollar + - Vibrant wallet enables non-custodial receipt and storage of aid + - MoneyGram provides global cash-out and bank-integrated off-ramps +- Benefits for aid organizations: + - Rapid bulk disbursements + - Real-time transparency and traceability + - Strong financial reporting and donor accountability +- Importance of stability and portability: + - Beneficiaries can hold value in USDC and choose when and how to cash out + - Funds remain accessible even as people move +- Why Ukraine was a strong pilot environment: + - High digital literacy + - Supportive regulatory context + - Existing digital infrastructure and government collaboration +- Digital dollars as a humanitarian tool: + - Reduced fraud and corruption compared to physical cash + - Instant settlement with internet-level speed + - Auditable flows that strengthen trust +- MoneyGram’s role as a bridge: + - Leveraging decades-built physical and banking networks + - Making digital aid usable in real, local contexts +- Broader industry implications: + - A direct counterexample to skepticism around blockchain utility + - A model for blending traditional finance and blockchain rather than “disruption” +- Preparedness and future scalability: + - Need to pre-position systems before crises + - Adapting solutions to local regulatory and cultural contexts +- Compliance and safeguards: + - UNHCR handles beneficiary determination + - Financial service providers perform KYC at cash-out points + +
+ Video Transcript + +[00:00] That the Stellar Development Foundation. When the war in Ukraine started, we realized there was an opportunity for us to make a difference. We had already been working with the Ukrainian government on its digital strategy and we realized that the vision that we have for payment rails on blockchain, the vision that we've implemented along with a lot of our technology Partners, could really help people in need. So over the last 11 months, we pushed hard to make it a reality. We pulled together our incredibly technology, our incredible technology organizations that you'll hear from today to create a system that shows why the Stellar blockchain and its ecosystem combine to make something truly special. We navigated months of testing to make sure that this was worthy of those that we are trying to help, and Carmen Hutt, who you'll hear from today, planted the seed that got the whole thing moving. So we are so grateful to her for her vision and Innovation and her trust in Building Together. And all of that brings us to today. + +[01:00] The Stellar blockchain Aid disbursement system is live, fully implemented by The unhcr, Who is delivering digital cash assistance to internally displaced people- idps- in Ukraine. Before we dig into the specifics, I want to share some important context. Hash based interventions serve as a Lifeline to the world's most vulnerable populations. This need is only growing, most notably with the start of the war in Ukraine, causing some 7 8 million refugees to flee the country and 6 5 million to become displaced within the country, which pushed the number of forcibly displaced people globally to a hundred and three million for the first time- the highest number of forcibly displaced people in history. This disbursement system that we're announcing today is designed to help these people by serving the aid organizations that deliver Aid to them + +[02:00] Directly. To tell us more, I'd like to introduce Carmen hat, the unhcr treasurer, who has been a Visionary in spearheading the development of this solution since the start of this year with the SDF. Hi, Carmen, hello everyone. Yes, indeed, today is a great day. It's a big step and of many to take yet to come. The solution is live. So that's good. It's fully integrated, and I'm also here wearing the financial, head. So I'm a finance professional. So we need to ensure that this solution really meets our high criteria in traceability, accountability, making sure that we can actually report Dollar by Dollar. So we are confident today that the reporting tool is fully in place and we can deliver in an accountable way. So the solution is life and we have + +[03:00] Basically implemented now in Kiev, in liebe and in Venezia and more to come. So far, so good. So it has absolutely achieved what we wanted to achieve. So we're on a so we're on a good path here. as I've said many times, internally to our team and also, I think, to you, Carmen, you have really pushed us to be better and to make sure that what we delivered was of the top quality. So what role does cash assistance play in the unhcr support of idps and refugees? It obviously plays an incredible role and it plays a crucial role because it gives people really the choice, because we have to imagine any of us if we were to be forcibly displaced to. know what we would actually need. So we really need to have, you know, Financial inclusion as well. So it + +[04:00] Provides and strengthens livelihoods and also economic growth. It means that people really have that choice of what they need and when they need it, so they have access immediately to the financial ecosystem. So that's really key, and you already touched on a bit about the notion of the transparency and from and putting your financial hat on. But what are the benefits to the unhcr of this system that we've built together? Yeah, the benefits are vast and multiple. So, on one side, we have instant access. It provides instant access to the financial ecosystems with, obviously, bulk disbursements so many beneficiaries can be reached in no time in an extreme Rapid Way. There's transparency, so we can basically + +[05:00] Follow the money, follow the data all the time, anytime, anywhere on any device, so we have full visibility on all transactional interaction. As I said, access is very important. So access to money, but to money. Access to money, but access to markets as well, and basically for the most vulnerable, to make sure that they can really survive, because we're talking about survival here. Stability, because it's the USDC here we are using, so that means that any time on the wallet that the beneficiaries can actually carry the US dollar and it is not. Obviously it's not due to devaluation at this point. So they have a choice as and when they withdraw partially or fully. And it is portable, so, as people are on the move, they have access to that + +[06:00] Money all the time as and when they need it. And there's the cash out, so the cash out options are very important because it needs to be contextually relevant and the access to money for survival is crucial through these Solutions. So it's not a hypothetical solution, it's a solution that is contextually relevant and is working. Yeah, I love to hear those words. We've worked together for so long to make this happen. You've already touched a bit on the notion of the transparency and, from a financial management and Reporting, is that something that's really important, not just to, obviously, the unhcr, as it's doing its own reporting, but to the unhcr donors? Yes, of course, so we have high scrutiny on each and every dollar that we get to deploy assistance and Aid in general. The key here is that we need to get + +[07:00] Visibility that we can. We need to get visibility that we can do that reporting. So, whilst we have data available on the Chain, we need to make sure that we have on time, real time, the information available. So the scrutiny is higher than actually in kind assistance in kind would be if we deliver, if bag of rice. So the key here is: deliver a dollar is basically monitored real time and reported upon. Thank you so much for that. All of these things that you just mentioned are so important and have so much to do with just the value of blockchain, but also just the value of these tools that we've built together. We wanted to share a message from Deputy minister bornyakov of Ukraine. The ministry of digital transformation has been supportive of the development of the solution over the past few months, so we'll go ahead and play that now + +[08:00] Greetings everybody. Ukraine has been fighting for the future, it's Democratic choice and its survival as a nation. 2022 is the hardest year for a country. For 11 months, we have been fighting with a brutal, illegal Russian aggression. Almost a year, ukrainians have been fighting not only for their country's survival and sovereignty, but also to enhance European security and prove that democracy is still the most vibrant form of governance. Today, another biggest shelling of Ukraine happened: dozens Iranian drones in the very heart of Ukrainian Capital, targeting critical infrastructure when people were supposed to be going to work. Today, 30 of power plants are destroyed. People have lost their access to the essentials: electricity and water supply. Russian energy terrorism is a new way of blackmailing- not only creating, but the whole world. Russian terrorism isn't related to winning over strategic objects. This was + +[09:00] Made to scare this track and, let's be honest, to kill ukrainians. Back in 1991, Ukraine chose to be an independent, sovereign country, and in 2022, we're fighting for a choice. From the early days of fosuke invasion, we realized that we can mean only with the help of Technologies, new approaches and Innovations. Ukraine is using Cutting Edge Technologies such as blockchain, cryptocurrencies, NFTs and web 3 in face of the Dreadful challenges of the Russian war, and today we can see how embracing blockchain technology allow us to scale humanitarian efforts in a way that wasn't possible before. The United Nation Refugee agency and the Stellar Development Foundation launched a first of its kind blockchain payment solution for digital cash distribution to internally displaced persons in Ukraine, for fleeing ukrainians and, primarily, for those whose Banks accounts are in acceptable. This pilot project + +[10:00] Providing humanitarian assistance using a digital wallet based on blockchain technology and will serve as a possible Lifeline for survival. The project itself is a vivid example of how blockchain has the potential to transform the way humanitarian funds are allocated. The use of blockchain Technology allows to ensure that the most vulnerable people will have access to funds provided in their name. In this way, humanitarian efforts all over the world will be strengthened in unprecedented way. The ministry of digital transformation of Ukraine is grateful to the United Nations Refugee agency and the Stellar Development Foundation for standing with the people of Ukraine and doing this in such an efficient and modern way. Thank you so, Carmen, after hearing from Mr bornyakoff and understanding you, already had a really good sense of the challenges in Ukraine. Why did you believe this could be a good solution to + +[11:00] Pilot there? Yeah, like we just heard that obviously Ukraine is a global lead in the development of Technology solution. They have a wide a white companies obviously available inside the countries as well. So from that also, the people themselves are very digitally literature literate and also the regulatory environment- very important, of course, to be able to access in a digital way the financial ecosystem and that allowed. So the combination on regulatory environment and as well, the technology allows basically to reach the people in need. As I said before, what is most important to us is obviously, so you know, to implement solutions that work and that are basically giving that Lifeline to people in need. So Ukraine, in this sense, is specific and + +[12:00] We would have to see how else we can. roll out this kind of solution further in the world. Thank you, yes, that's great, and I'd love to turn to the technical components that bring this solution to life, key pieces being circles, USDC, the vibrant non custodial wallet and MoneyGram International. So let me just provide a really quick overview of how the funds, the flow of funds and how they get from the unhcr to idps in Ukraine. So the unhcr is responsible for program eligibility, beneficiary enrollment and the allocation of relief Aid, as well as the overall protection of the beneficiaries. The unicc serves as the Technical Solutions provider for unhcr. It manages the back end technology, with the full internal implementation of the Stellar disbursement platform and integration into the un's cash and Reporting systems, making it the first independent instance + +[13:00] Of Stellar Aid assist, which is so exciting to me. The Stellar network is the blockchain that transmits the funds and holds the record of Truth, with the Stellar Development Foundation providing Technical and programmatic support. USDC is a one to one backed digital dollar issued by Circle, and it provides a trusted digital dollar, stable corn stable coin for a safe store of value. Vibrant provides the digital non custodial wallet software that enables program participants to receive and store their relief Aid. And MoneyGram International provides hundreds of thousands of locations around the world where participants can cash out their digital dollars into local currency. Some MoneyGram locations support Cashing Out physical USD and or Euro, depending on user preferences. So MoneyGram is integrated with several large banks in Ukraine so it allows for seamless digital cash outs to the beneficiaries bank accounts from their phone if that's the way they choose to do the cash out. So now I'd like to invite Dante desparte from Circle and Anna Greenwald of + +[14:00] MoneyGram International to join this discussion and talk about the specifics of this innovation. Dante, can you speak to the value of having a digital dollar as a secure store of value in the humanitarian Aid use cases like this? Sure, thank you, Denelle. It's great to be with you and with Carmen and Anna here today. This is one of these really powerful examples that, while it might still be in the Prototype phase, I think is the type of innovation that should be in the hands of everyone, everywhere, in no small measure, when you have disasters and humanitarian crises like Ukraine- and, I should point out, these types of Necessities aren't just for conflict zones or emerging in developing countries, but very much here in the United States- in many places, the movement of money in a disaster scenario is often strictly about Supply Chain management and Logistics. When you have trusted forms of money in digital form, such as USDC, and you have blockchain networks like Stellar and you have the + +[15:00] Integrations with networks like MoneyGram around the world, you now have a form of money that can be sent to people with the instantaneity of the internet, have the underpinnings and the fundamental Trust of the US dollar, and then have the auditability and the corruption resistance of a completely perfect transmission medium in the form of blockchain networks, and think about what we're improving upon. In many cases, humanitarian Aid disbursements are physical pallets of cash delivered to a physical air base or a an airport somewhere in the world, and those pallets of cash are nothing short of Honey pots for corruption, bribery and fraud. The type of money we're moving in this prototype, in this project, is anything but a honey pot for corruption, bribery and fraud, and so I think what we've done here in Ukraine and what we hope to do in other emergencies around the world- we should not forget a lot of Forgotten crises around the world- where this type of prototype can be very meaningful- should be a part of this new digital humanitarian blueprint. So, Circle USDC, + +[16:00] We're all very proud to be a part of this announcement today. Well, yeah, and I think one of the things that I've always talked about- is just how important it is to be able to send. The value that you send is the same as the value that you receive, and so what you guys have built and the opportunity to be able to leverage a digital dollar that is one to one back- this is so important. I know you guys know this, but to be able to have that to be part of the solution, it just really makes it work seamlessly, absolutely. And the point I was going to make is, again, this is not the first time USDC has been integrated in such a large scale humanitarian project. A number of years ago, at the height of the crisis in Venezuela, USDC had a very similar project to provide for corruption resistant, instant and audible, auditable peer to peer payments for doctors across the country supporting the pandemic and Disaster Response at that time. The key, of course, is it's not enough to do this once. There's so much + +[17:00] Great humanitarian need around the world and, as Carmen said at the outset, helping people in acute scenarios like the war in Ukraine is one piece of the puzzle. Ensuring that you're advancing, you know, material Financial inclusion objectives along the way, I think is also very critical, and so we're concerned with not only providing people with no buyers and Spenders Remorse by having a trusted digital dollar, but, as MoneyGram has provided, now you have this Global on and off ramp, so that those digital dollars are not potentially stranded on your mobile device or your phone, but they now enjoy a global exchange Point all over the world through this partnership with MoneyGram as well. And that's a perfect segue into a question for you, Anna. This solution is as much about blockchain Innovation as it is about how traditional services and fintechs are coming together to make a difference. Alex and I always talk about the fact that this is really that blending of the old world and it's one of the things that we world and the time about- how that + +[18:00] Enhancement just really creates so much opportunity. Can you speak to how these off ramps into local currency are meaningful for idps? Yeah, thank you, and first I just want to Echo what Dante said about how thankful MoneyGram is to be part of this conversation and part of the solution with unhcr and Stellar. I think Dante used the word acute in talking about meeting the needs of this population- and I think that's one of the places where MoneyGram provides the most value- is that having these physical networks and Integrations with banks and the licensures needed to do cash disbursement. It's very time consuming, and so I think one of the challenges faced with refugees and with idps is overcoming that initial hurdle of getting into the market, and so I think moneygram's ready made cash disbursement network that we've spent decades building out can quickly + +[19:00] Now be turned into this incredible asset in this partnership, and so, again, we're just so thankful to be here and to get to utilize that network alongside you guys. We launched the MoneyGram relationship with the Stellar network in June, and to see it not just be live in 180 or so countries globally, but to be able to be used in this way is, for me, just sort of the reason why I'm in this space. So I'm so grateful to the support that we've had from MoneyGram, from and from the team there, to be able to make this come together. Dante, it's obviously been a really tough time in our industry, with many Skeptics out there about the utility of blockchain. This announcement is the direct counter to that right. It is in fact a year ago yesterday- I sat in the Senate Banking Committee, having to not only defend the activities of circle but, in some ways, + +[20:00] The entire industry. And yes, it has been not just what we refer to in this industry- a winter, but perhaps an ice age, and at our darkest time. I think this example today is a good example of a bright light also, I would argue. A year ago, Circle announced a broad program known as Circle impact, in which we made a few big commitments. One of them was that we would allocate a billion dollars or more to minority depository institutions in the U.S. And Community Banks. We've done that. The second is that we would launch a digital financial literacy program with historically marginalized communities and colleges. We've done that. And the third and perhaps most important pillar is that we wouldn't just sit back idly while Humanity suffered. Or there were dire humanitarian crises, and I'm again pleased to say that when you know, unhcr and others had to make an exacting choice about which digital currency would be a part of this program. That USDC was there, and so the key for me, though, is these opportunities shouldn't just be pilot projects. Humanity + +[21:00] And the need for Disaster Response requires that we pre position these capabilities well before a conflict or a disaster strikes, and so we look forward to partnering with Stellar, MoneyGram, the United Nations systems and candidly Aid agencies all over the world to ensuring that these otherwise open Digital public goods are readily available for people everywhere well before a disaster strikes, and so I think that's our priority as we look into 2023, and so we could end this otherwise dark year on a slightly bright note and not rest on our Laurels that this is just a pilot project. Today, this should just become a part of a norm all over the world. Here and when we and Carmen, when we talk about this experience and how we can use this experience to prepare for future crises, what is? How do we build these Solutions so that they pre exist before a crisis event strikes? Yeah, preparedness is key. So I think that + +[22:00] This is one step. We have a solution, that place and we now need to see how in it is elsewhere. And it is, there's no doubt. But we need to keep in mind that these Solutions required to be useful with the people at the center. We serve, so, in each context, we need to look contextually, what really, you know, what are the key criteria to make this happen, and our ultimate goal is to be able to provide, any time, anywhere, on any device, the benefits. So you know, unhcr has been in that business for some time, so that's what we do each time we have an emergency, but we always can get better. So we need to build upon this. These seats: I think the seats are now in the ground, so we need to make sure we can leverage them sufficiently and you + +[23:00] Know, at the Baseline, we need to be able to follow the money and follow the data. So I think once we overcome that, then we are ready for any disaster scenario. Also, a key ingredient is the regulatory environment. So we need to make sure that each and every jurisdiction, we understand what we can do, because we are not essentially in the business to run a parallel system. We are here to you know, to integrate to the financial ecosystem and that people can go about their lives like any of us would wish every day. So this is how we see it to leverage fully the current solution that is being put in place. I think that's really important and, Anna, I wonder if you could think about this solution. We tried really hard when we + +[24:00] Were, when we first met with Carmen and others from the unhcr, to make sure that this wasn't a square peg round hole and to make sure that we weren't just like trying to force blockchain into a situation that didn't make sense. Certainly, we've been working with you guys since June and have you seen that, like this is different than what you guys have worked in humanitarian situations as well before, and how do you feel about the way that this works with respect to the different offerings in terms of cash versus bank account kind of thing? I am so glad you're asking this question because it's something we think about a lot at MoneyGram. I think what's unique about this use case is that it demonstrates truly the best of what each of us have to offer. It demonstrates how you can blend and create a bridge, and it moves us beyond the conversation about displacement- and for me, it was never about displacement. It was always about how do we take the best of what each of us have to offer and create True Value, and so I think this is just + +[25:00] One of those things where one plus one doesn't make two. One plus one opens the universe to something much bigger and again moves us beyond the conversation about displacement and creates a model, hopefully, for other parties like ours to see that there really is this powerful relationship that delivers value and we can move Beyond seeing one another as competitors or displacing Technologies. Yeah, Alex likes to say that he's been. People have been trying to disrupt your business for the last 50 years. We're actually trying to get your business and to use all of the tools that you've built to be able to bring good to everyone. Dante, you have spent the better part of your career really focused on these kinds of issues: humanitarian Aid and spending time really for the folks that are in the most need. When you look at the tools that we're offering here today, how do you see this as different from what you've done before? Yeah, I mean for one, this is no longer an abstraction, + +[26:00] And I cannot say enough about how grateful I am to Carmen and the unhcr for having had the courage of trying to build something that is not a science experiment, but rather that is in deployment. So I spent a number of years on FEMA, that's, the U.S. Emergency Management agency's National advisory Council- a number of years in the insurance industry, and I am myself a disaster displaced person when Hurricane Hugo destroyed Puerto Rico in the late 80s, along with it went, you know, any semblance of normality in my life, and so when speed matters most, friction stands in the way, and moving money in analog forms is stands in the way. Remember, here in the United States, we moved more than six trillion dollars of taxpayer funded money to everybody in the country, and so much of that money was ultimately lost- enough to fund the totality of the UN sustainable development goals. I should point out, in no small measure because the pipes are Antiquated, they're leaky, they're back here looking and they're plagued + +[27:00] With opacity. The form of money that we're talking about here, and the form of integration that we've designed- together with Stellar, of course, with MoneyGram- is a form of money that is auditable, portable and, ideally, universally usable. If you and I were going to do this with physical cash, it wouldn't work and, ironically, in the context of a pandemic like covid 19 it would be as limited as our arms can stretch and would have all kinds of other limitations. So this is not meant to be a substitute. Innovation is just meant to provide additional levels of financial support and humanitarian support, and so I'm excited for what this prototype means and how this can be deployed, including, frankly, domestically here in the United States. I think it's really important. By the way, I've been monitoring the comments to see if there are questions. We have just a few more minutes if anybody wants to add some in there. The one of the things I was going to say, is the Simplicity of which this works, where the individual, the IDP, receives a text message to download the wallet and then they can. They know that they + +[28:00] Have the opportunity to receive aid from the unhcr and it immediately gets put in their wallet using the technology. There's actually a blog post that we have up on our website that'll explain the technical back end of how this all happens. This is really what you say in terms of getting to sort of the heart of the issue and sort of changing the dynamic really, and just created an opportunity for the to put the power in the hands of the IDP, and I think in this, these situations, they don't feel that they have a lot of power, but this allows them to choose to cash out, to hold their value, to cash out a part of their value, that type of thing. So it's really exciting. There is a question here, and this probably goes to you: what is the KYC requirement on the recipients and who is in charge? It's both unhcr and Anna. You can start- yeah sure, I'll start. So KYC: we're talking about a global solution and KYC requirements differ around the globe. But I'll speak specifically to you, crane. Like we + +[29:00] Mentioned a couple times, our relationships in Ukraine are primarily with banks, and so both us and the bank, we require the consumer to have identification that proves that they're the person who should be receiving the funds, and so, along with that, their name, their address, their phone number is what's required to be KY seeds in person. And, Carmen, you are. The unhcr is the entity that actually determines the beneficiaries that are receiving the aid from the unhcr exactly. So unhcr is in charge of registration, enrollment, eligibility, determining how much everyone will ultimately receive. So unhcr is not performing the KYC, so it's the financial service provider at the cash out that is performing KYC. Yeah well, we are at time. Thank you + +[30:00] All for joining. This has been such a great announcement and it makes me feel like this year, all that we've worked for together collectively is just so worth this kind of thing coming together before the end of the year. So thank you all for supporting the work and for joining us in this journey, and we want to continue. This is not just going to be the only thing that we do together. So thank you again, thanks for joining today and thanks for the work that we've done together here. Thank you. + +
diff --git a/meetings/2022-12-22.mdx b/meetings/2022-12-22.mdx new file mode 100644 index 0000000000..fe8a8552dd --- /dev/null +++ b/meetings/2022-12-22.mdx @@ -0,0 +1,144 @@ +--- +title: "Auth Next: Account Abstraction Proposal" +description: "Discussion of Soroban Auth Next, introducing account abstraction and standardized invocation authorization to simplify signatures, enable complex contract calls, and improve wallet interoperability." +authors: + - dmytro-kozhevin + - justin-rice + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [soroban] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session walks through the Soroban Auth Next proposal, focusing on how authentication and authorization can be redesigned to better support real-world smart contract usage. The discussion frames current pain points in Soroban auth—limited invoker semantics, fragmented signature handling, and poor support for multi-step or cross-contract calls—and explains why these issues hinder composability and wallet UX. + +The proposal introduces a more general, protocol-supported approach that shifts authentication concerns away from individual contracts. By combining account abstraction with a standardized authorization payload, Soroban aims to make complex transactions easier to sign, reason about, and safely execute, while remaining extensible to future signature schemes and wallet designs. + +### Key Topics + +- Limitations of the current invoker model and SDK-based auth helpers +- Account abstraction as a first-class concept, separating addresses from signature logic +- Built-in account contracts for classic Stellar accounts, preserving existing behavior +- Custom account contracts enabling programmable wallets and new signature schemes +- Standardized invocation authorization payloads that describe entire call graphs +- Signing thresholds and bounds (not exact values) to support non-deterministic flows like swaps +- Use of preflight simulation to generate structured payloads for wallets +- Improved interoperability between contracts, wallets, and SDKs without bespoke auth logic + +### Resources + +- [Soroban Auth Next proposal document](https://docs.google.com/document/d/1J-J3ClTUkrsLiJag906OH4hmNkZI3Jk6_Y9ZYt_psAI/view) +- [Soroban design discussion examples referenced in the Auth Next proposal](https://github.com/stellar/soroban-examples) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to today's Soroban Design Discussion once again we're here and we're talking about Soroban and sort of the key design decisions, that we need in order to, that we need to make in order to sort of get it across the Finish Line for those of you who are just joining us today we will be talking I'm just going to grab a link as soon as I can find it. If you look at the event invite you will see, that there is a link to a document called Soroban Auth Next and I am also just going to paste it right. Now in the live chat. So today we're going to be talking about this proposal, which I'm posting in the live chat right. Now oh Tom already did it. So this is a proposal for a new Soroban auth approach, that provides protocol level support for the account abstraction and an authorized subcontract calls and again there's a link to the design decision document + +[01:00] that is, that I posted, that Tomer posted in the live chat. And so what we're going to do today is talk through this question sort of talk through the document and the proposal, that it makes and see where the discussion leads us the goal of these discussions is definitely to make substantive have substantive back and forth to actually ask and answer important questions and to start to move forward with design decisions and we do it here so, that you can ask questions either on the live chat. If we have a chance we may also be bringing people who raise their hand onto the stage. But to begin with we will talk through the actual design doc we'll talk about the decision, that we're trying to make and we will talk about the sort of like the background the problems, that we're facing the requirements and a suggestion for a high level design. And so now, that it's a little bit after the official start time I think we can go ahead and start and I believe, that today the + +[02:00] discussion this doc was created by Dima. And so I'm going to demon to start walking us through it yeah hi everyone. So but maybe I'll start with a little bit of background motivation this. So described in the dog. But how quickly go through those. So just to remind like the current state of office in Sargon is, that we basically have to kind of officially supported ways right to do us in the contracts the first one being the invoker, which basically just gives you a handle of the classic store account who involves the transaction and, that's pretty limited it's like basically quite extra account can authorize only a top level contract invocation and only it and there is something more generic + +[03:00] which is implemented in turban OS SDK and this is basically authentication and SDK, that supports a couple type of signatures, which are classic store accounts Ed 25519 and invoker as well to make things a little bit more generic and recently we ended up in this somewhat weird state of to approaches, that the initial philosophy was, that we want things to be like basically implemented as much as possible on the contract side and the environment doesn't make pretty much any assumptions about the toes. But then we kind of figured out, that these things are not quite straightforward especially. If you want to do something simple, which is why this working concept I've been talking about has been brought up. So + +[04:00] and of course there is always synthesis smart contracts there is always a way to build whatever you want. So what are the problems with the state here. Now the invoker. While being a convenient to use it's very limited as I've mentioned it's like very narrow only operation can have only a single invoker right and it's also shell in a sense, that only a single contract call may be authorized and it's also restricted to basically classic accounts of course it also works with the contract navigations but. Because they got today's discussion I'd say nothing would change much for the Contracting workers. So basically. If contract wants to outrage some synonyms at the help of contract itself like no additional signatures are needed. So it is kind of not in the scope of the discussion at all. So we are talking + +[05:00] about only humans or wallets calling into a servant here right. So for Soroban us well as we mentioned it's a pretty complicated. Because the contract needs to manage nonsense it needs to go into authentication methods provides the right arguments those upload correctly. While being pretty complex it's still limiting flexibility. Because we support only a few kind of Cherry Picked signature schemes right and. If you start doing things with Advanced for Banos like forwarding the signatures for subcontract holes Things become pretty tricky in terms of like actually signing the payloads, that need to be signed. Because you need several signatures and they need to have some very particular arguments and I believe I have seen some examples in + +[06:00] servanton, that or dealing with, that. But I feel like still like this hasn't been expert to mentioned probably. Because it's pretty tricky to do and yeah I also don't think we have great science support anyway as of. Now so it's something you will be working on separately and of course. If you are doing something completely custom the risky risk running into it's a compatibility like some contracts use one authentication or authorization methods Azure contractors something else they like not being profitable or the SDKs need to account for multiple different Azure SDKs they need to interoperate with. So basically Things become pretty complex. If you want to kind of benefit from something else existing on the network. So yeah this is basically a review of the current approach problems and + +[07:00] I've been thinking like. If it can come up with something better in multiple ways and this phase are described in the doc too and. If you really want to try for is the approach should be a general rights, that most or all contracts should be able to use their spring work and basically interoperate is a chatter without much friction. But on the other hand it should be extensible so, that we are not limited to a few for example signature schemes or a few wallets and whatnot. So there shouldn't be a way to extend the system outside of the protocol right it should also provide support for various use cases including the complex use cases. But at the same time it should be a straightforward as straightforward + +[08:00] to use as possible given the complexity of the contract and from the lowest endpoint it should be usable too. Because as I mentioned like currently coming up with signatures for more complex contract invocations it's pretty tricky. So what would kind of should be able to write the signing code to basically sign the payloads without much prediction as well and something, that hasn't even been discussed before. But it's something pretty interesting, that there like we want to provide the both controls. And so outside and yeah this kind of already goes into account abstraction topic. But basically the idea is, that. If you stop for a moment and think about Us in general like in erc20 world for example right you have + +[09:00] this for example token contract. So liquidity pool contract, that does something to the funds. But it holds also does some other things, that I could verifies signatures and the signatures use very specific algorithms and you start thinking about it kind of doesn't make too much sense right your liquidity pool function is about exchanging to tokens for example a token function is to keep balances right and doesn't necessarily like come natural, that they should handle the business logic of authenticating users at all visuals versus account abstraction, that comes from and it really opens up a lot of various interesting use cases and basically enables the whole type of the contracts the smartphone contracts, that can do really interesting things like providing + +[10:00] arbitrarily complex multi signatures schemes or align users to control Hauser plans are spent based on who signs the transactions or given temporary permissions to some contracts and the possibilities are basically endless. So yeah this was it on the requirements topic and this kind of only started to come into the actual proposal and basically on the hair level it consists just of the two parts one part I've already started talking about it's account abstraction. So instead of letting every user contract to care about how they authenticate their signatures or how they manage nonsense. And so on instead you would every contract + +[11:00] would use something abstracted away just some generic account, that knows. And so on how to authenticate actions on its behalf and this addresses the triology and extensibility requirements as well as forward usability and control and the second part is basically standardized invocation authorization, that would allow to sign things, that are on one hand more complex than just going into a single contract. But on the other hand it will be structured enough so, that the bullets I don't need to do anything too special. But like the non-smartboard just regular walls we'll be able to do the signatures relatively easily + +[12:00] okay and let's go in turn a little bit more details on both approaches and I'll probably stop serve for some maybe questions and discussion on the general approach. So core current abstraction what's important to understand from the contractors standpoint is, that instead of like passing signatures into a medical calling into this and invoker we do now, which is like an educator in a cage all the contract interfaces would operate on two types one Echo account and, that's very cool address. So the relationship is, that every account has an address is something, that you can key your data by. the address or you can make a payment to an address and whatnot. So + +[13:00] that's basically the unified address for any sort of operations, that are beneficial to the address right and the account itself is the owner of the address and it is the entities, that can authorize basically spending on behalf of this address right. So account needs to sign its operations and actions, that require like some mutations to the address, that are not positive for example making a payment right need to be authorized by the account. So this is basically what the contracts would do this and accounts may be implemented both in a built-in fashion for example classic store accounts would get an automatic implementation of this account contract. So from the classic + +[14:00] server constant Point nothing will change significantly and the invoker would still be there for the sake of optimization right so. If you have a simple contract you can forward the classic transaction Source account to be the signer of this Contracting location to and again this is completely transparent from the contraction point and like it is. Now where you explicitly need to refer to the invoker. But also the part I've been talking about before the account can also be implemented by Mark contract custom Smart contract and here like things like new signature schemes like any cryptographic methods and basically anything can come. So it's the responsibility of the account to authenticate and authorize + +[15:00] the Contracting locations, that's the key part about it and on the second part for standardizing the invocation authorization. So instead of like having some signature payloads, that are not necessarily standardized we would have a structured signature payload, that contains one or multiple Contracting vacations and the casing here is, that this payload needs to be assigned just once and it would contain all the contract calls the user wants to outrage. So for example the frequent use case. When you know you need to transfer some funds to the contract address. And then contract does something good for you right for example create a claimable balance for like just draw some amount from me and put it on contract balance to the Future withdrawal or swap some + +[16:00] token with liquidity for basically it's like huge category of use cases for this exact same pattern right and. When in ERC 20 world like some things like approval permit would need to happen with this approach it is possible for the user to send a payload, that says hey call into function for example Swap. And then allow the function swap to this draw this amount of token from my account and since things are structured it's possible for example for the user board program to say Hey you are about to sign something, that will withdraw this and, that token in this in, that amount. So basically it is set things to the structure it is kind of both transparent to the user and it is possible for the host to actually verify, that the user is only calling into methods, that have been explicitly + +[17:00] signed date and I'll try it and yeah important note, that came up during this document review is, that the signature pillow doesn't like have to contain all the contract calls and it doesn't even need to be like covers and full passing the code three of the contract it's just part of the parts, that has to be authorized on behalf of the user. So for example the slope example. If the user swaps some token with the contract this contract may be a code as a part of arbitrary as a contract for example you know you are signing, that you want just to swap some amount of token for some amount of other token. And then this can be invoked for example as a part of some more complex text construct and the laser doesn't even need to know about this contract. So what was the authorizing is a, that something has to be solved. And then + +[18:00] some Upstream systems can decide how exactly this happens. So I think this is pretty much it on the high level before we go to the details I'd like to stop here and ask. If there are any questions or maybe some things need to be clarified yeah this is awesome thank you Dima I'm just for everyone who joined just. Now I just want to mention, that this is this meeting today is not about necessarily making decisions it's just about having Dima present this like off to or off next approach and you know ask questions and to figure out the details here this is a really interesting proposal Dima I think, that one question I have around account abstraction is one thing, that we're seeing in other ecosystems is, that account abstraction is also mentioned in + +[19:00] the context of being allowing for like different accounts to be the origin of the transaction and I wonder. If this proposal also pays the paves the way for kind of like a non-regular Stellar account to be the origin of a transaction to have in like a native balance in XLM, which is something, that isn't mentioned here or is this not a consideration at all. So I didn't think about this much in terms of. Because this is more about us. But but you know I think it kind of comes back to the discussion we had about price twins for example like visor contracts should have tried to end. So whether contracts should have excellent balances I don't think, that it necessarily well I think it kind of + +[20:00] makes things easier in a sense, that you know since every single operates on accounts. Now it is kind of more natural for the account to also have excellent balance they mean it will have like some balance in effect. So but. But I guess this question is more up to the design of the you know interactions with xlma I'm not sure we have some different decisions, that it basically once referred like it might make things simpler. But maybe not. Because the main problem is withdraw in any XLM from a contractors, that we wouldn't want to run a VM for, that. So I think, that the question, that needs to be resolved before we can go into this topic + +[21:00] topic yeah I guess my question is should the XLM balance be part of the account abstraction or potentially facilitate this in the future right. So so basically I mean nothing prevents an account contract to have an excellent balance right the tricky part is how exactly do we withdraw this Excel and from the balance without invoking a VM and figuring out, that this account has actually not traced this so, that's the tricky Parton I'm not sure about it. So I mean interior like there is a very easy way with account abstraction to do, that the only issue is, that would require to actually call into account contract, which is a problem. Because we need to do this before applying anything, that's a little concern. So yeah maybe I can add a little bit to this like the kind of the type of challenges in the account abstraction in other blockchains + +[22:00] they come from the like. When you try to use the contract to pay for its own fees right I can basically like you have like sequence numbers like nonsense basically and fees being paid back the contract and as you as soon as you start to get into, that game you need to basically execute the contract. When you're flooding the transaction. So before it gets processed by the network. And then you get into those like yeah like big challenges around okay how much like what's the maximum gas or whatever right, that I want to allow a contract to use outside of applying transactions and of course this gets quite complicated maybe like to go back to like to what Thomas was asking like would, that be the type of thing we can add in the future I mean I don't I think + +[23:00] the current proposal doesn't stop us from doing, that. If we find ways to make this efficient like maybe like some of the things we could and this is related to the other conversation we are having on the token contract like should the Lumen balance for example attached to a contract be something, that is kind of first class so, that we can efficiently like even decide. If like. If if a contract has even enough balance to pay I mean this is one of the problems it's not the only problem. But you know it's one of those things, that maybe we could do early on anyways. But yeah, that's kind of where we are I'm curious. So there are two parts there + +[24:00] are too many major parts or main parts to this proposal the first one is the account abstraction the otherwise the other is a standardized invocation authorization. If we focus on account abstraction for a second putting aside some of the details, that we just mentioned is anyone does anyone object account abstraction it sounds like it's pretty I think it's a win-win situation like we're looking at other ecosystems and you can see, that you know the Ethereum ecosystem is working very hard to retrofit account abstraction and you know regretting not adding this in the beginning and we're seeing a lot of like challenges, that come from not starting off with account abstraction. So it almost sounds like a no-brainer to me is there anyone, that opposes a account abstraction + +[25:00] Dima from your perspective are there any downsides to adding account abstraction yeah. So it's a account obstruction in building fashion is definitely like a win-win in a sense, that you know like. If you're joking about things, that are supported by Fosters almost no performance cost and the contract interfaces become really convenient in terms of the custom abstract accounts I mean it's definitely a win from the functionality standpoint the downside is, that you know using them would require more gas obviously. Because you need to run mobile and you need some additional they are invocations. But I mean I feel like this is a fair price. Because it allows to achieve some functionality, that is not the chewable + +[26:00] otherwise. But yeah basically the main cause to it is the performance cost and we need to be at work with and you know it's some party wants to you know pay for the contract notifications and they may have the same trouble with absolutely it's a custom account project. But yeah not sure is it's a big enough deal basically I think in gree Lake account abstraction could be implemented in some way even. If they didn't do anything on the cost site. And so it would be even slower and worse. So you know I don't think this downside is being cool enough to kind of state it shouldn't go for account extraction awesome okay. So I think we can move to the standardized invocation authorization yes people don't want to keep talking + +[27:00] about account obstruction can I ask you just a quick question timer go for it yeah. So does this mean, that we are. If it sees white adoption for just regular accounts or maybe this is the most basic question, which is does this impact regular accounts can I take my Stellar classic account and put signing Authority on into a contract and. If so does, that sort of blur the line I mean we have this sort of clean separation between classic and Soroban. And so are we you know sort of opening up the sort of bleed over between those two systems wider and consuming you know forcing more sort seller transactions to use Soroban resources in a constrained resource environment is, that a downside I guess oh what Miss herpes actually this is not + +[28:00] right like I don't think anything changes functionally much for the classic account compared to the current world. Because currently we already as I said provide this invoker method, that calls them to Classic Source account right and we also allow like we have SDK for the promoted in the transaction for the inbox Paul tries in the contract invocation on behalf of the classic accounts, that have been pre-signed. So you can pre-sign something using your classic store account with threshold and server they did. So nothing changes in, that sense and from the contrasting point what actually changes, that contracts themselves don't need to know about, that classic account existence, which I think is pretty good thing. Because like the as I said like there is really no good reason + +[29:00] because I can't to worry about like how exactly something has been out right. So I don't feel like there's any functional difference. But seriously the positive effect of contracts being more generic without like specifically according to the classic accounts yeah I would add to, that like my mental model of this is similar to what we're doing with a Stellar asset contract on suraman basically all the interrupt with Stellar classic and all the you know idiosyncrasies, that come along with, that they are in this built-in contract, which applies both to the Token contract for the asset contract it also applies for Stellar accounts now, that we have proper abstraction all the finagling with Stellar accounts happens within this built-in contract to some extent it's actually a lot cleaner. Because it + +[30:00] doesn't it doesn't pollute like the you know post functions like they don't need to know too much about Stellar accounts. Because all of, that is kind of like abstracted away and is mostly present within this built-in contract would, that be accurate them up yeah exactly Nick does, that answer your question yeah, that's, that's helpful thanks cool so. If there are no other questions on the account obstruction we can move on to the standardized invocation authorization this is definitely you know the more I think a fairly novel approach and I'm curious Dima what the potential downsides, that you see here are like one thing, that I will say is, that like one thing + +[31:00] that immediately gives me pauses is are we putting too much on like the pre-flight process you know I think. When we were talking about pre-flight just for in the context of getting the The Ledger entries, that you're gonna touch, that's something, that's relatively easy to you know to predict. So for example. If you're going to do a token transfer maybe you don't actually have to call PreFlight you can skip, that. Because you know what's going to happen there in terms of the signature payload suddenly a signature payload appears to become like non-trivial at all. So are we building too much dependency on pre-flight here well I would say I would put this like okay let's talk about separate part. Because I don't think about it before. So what tomorrow refers to is basically the way I propose to enable this + +[32:00] approach for the user can sign like complex payload with multiple contract invocation the main enabler for, that is PreFlight, that would basically run the Contracting vacation in like authorization trades and mode and return the signature payloads, that need to be signed by the votes and I would argue, that this is basically the satellite kind of an enabler of the approach. Because without, that the only thing, that we have left. If we want to do something more complex than one invocation one signature project invocation would require custom payload building, that is also contract dependently. So basically the proposal itself is about standardizing the payload format + +[33:00] which in the first place say wait probably shouldn't be like really controversial. Because like the benefit of having custom rewards is kind of questionable right. But you know. If you wanted your pillow to be complex like what is a good way of building it and pretty quiet it's one of the answers to, that. But for the simple cases you know as you have mentioned like well what. If we transfer token the same can be said about the signature preload like. If you are in the same world where you just you know want to have a single account, that signs for a single contract code, that doesn't go into any sub subcontracts on behalf of, that account. Then the signature pilot is very simple and it's not trivial and again the benefit of having its standardized and structured is, that you know you can write a libraries, that + +[34:00] would build simple payloads for you and, that would work again for every contract it is you can say framework creates a kind of search and stuff, which we hope to be almost any contract. So so I'd say like there is definitely some downside like in, that in case of a really complex invocations on behalf of the user the pre-pride needs to be used twice and it may be a little bit annoying. But you know the issue is, that alternative to, that would be World War everything needs to be built manually like prefighters basically is the best thing we can probably provide out with, that or you know just go with the approach of erc28 approach right therapy just always + +[35:00] saying the single call. And then did do a lot of State manipulation to actually make things work. But I'm not sure, that's necessarily a good thing. Because it comes with its own set of problems, that again arguably should not be stopped on this level like we put some things into token interface. But what. If I want to write. Then a POI can include my grade all this set of different methods, that actually work around the limitations of like the signatures. And so on. And so forth. So basically the prepaid here is something, that actually helps us a lot I think and yes like made this Rich downside and maybe increasing a lot and it or it is rare cases where it cannot be used. But you know you can always build manually especially. When you like control will + +[36:00] control exactly also contracts in your Coast Tech and you know exactly what needs to be signed. So I don't feel like it's big enough downside and also I wanted to mention, that like the approach here is a bit rough around the edges. So it's around some other things, that may be different in terms of implementation for example like maybe they could some logic a bit more. But basically yeah I don't want to go into details maybe just yet. But the point is like maybe we can make things a little bit immature and they are described. Now but yeah I'd say creep white is like I wouldn't look at prepare the downside it's actually quite an upside. Because also Alternatives we've looked at well discussing like + +[37:00] what we how can we simplify building the payroll say would be kind of for example an alternative we have discussed with John a. While ago also aquatic contract could provide the methods, that says how exactly to build a payload for every call of the contract matter. But you know this is a lot of work to be done by the contract developers and again it needs to happen for every contract well here we kind of come up with a generic mechanism and you know the contract writers don't need to worry about the better homes they can just write pretty complex contracts without worrying about users not being able to ever build signature for one and you know it doesn't need to be part of the interface or anything it's like very naturally healed so, that's my sense on it I you know someone has more + +[38:00] concerns about, that yeah maybe the thing, that yeah. When you mentioned like the whole like having to do this work for like contract developers having to do this kind of work for every single new interface type is kind of a is super important like I think yeah what we're talking about here is. If you look at like erc20 like you have for example the what's the Ikea 2612, that's the permit extension, that's for your C20. So like. If you think of like NFTs right they don't use this it's a new method. And then you have to basically support this both at the contract level like as a caller to, that contract you have to support this new this kind of way of wrapping and signing things right and you also have to of course do, that in the wallet in the SDKs. And so on. So the complexity is increases very + +[39:00] quickly. When you don't have any standouts. So I think the being able to use PreFlight, that we have there available to us in this context is actually going to reduce friction around the adoption of like those kind of a encryption schemes at the right place in the different contracts in a way, that I think is maybe like a unprecedented in the ecosystem oh yeah, that's a good point and obviously. If if the ecosystem wants to adopt like in the ap2612 style you know permit based authorization they can do, that. But you know we hope, that you know we want to make something, that's batteries included one other question, that I have just like looking at the doc right. Now is you know it looks like we're signing all the function calls including the arguments it are there situations in + +[40:00] which the arguments change slightly between invocations in a way, that invalidates the our signatures FEMA yeah. So yeah the testing discussed on the doctor. And so basically the idea is, that you probably shouldn't write your contracts in a way where the signature arguments change for the subcontract codes again it doesn't really matter what kind of your approach you're going with like even. If you go we are seeing 28 permit for example like you cannot say, that hey what are the arguments of current change a little bit like whatever has been signed has been permitted. So it is kind of similar and in cases. When you know some arbitrary amount of token need to be spent for example the proposed approach is use approved + +[41:00] style method like we wouldn't remove it from the token interface it's too useful for cases like, that the difference would be like. If you do things like temporary storage for example this approve may be alive only during the top level Contracting location. So you know there are no problems with hey I've approved this token like a thousand units of the token. But the contract spend just 500. And then there is outstanding 500 approval for me and this contract, that someone may misuse or whatever. So instead what would happen you would atomically call a proof inside the contract invocation and it will only be active during this contract invocation the top follow one. And then basically after a proof has been executed the contract can spend up to approved amount of so. Because of the user and obviously not the whole amount + +[42:00] needs to be spend. So this amount will just stay in the user account and zero won't be even an outstanding approval, that can be done with you some help. So basically the idea would be, that the user still needs to sign some upper bound on what is going to be spent and, that's basically the hope, that this is you know for the most contracts and again it's not like we can do much more besides maybe I don't know user signing, that they approve spending as much as contract wants. But again we don't remove this possibility either. So basically yeah I think like the contract should be reading just in a certain way to kind of allow for specifying an upper bound not an exact amount, that needs to be transferred from them this problem shouldn't appear much and it's kind of unrelated to this proposal it's like in general. If you + +[43:00] want to do anything with outright subcontract both we will run into this issue and I think, that I was thinking also about, that like the there might be ways actually to you know once we have kind of the this kind of authorization model kind of figured out like we may actually do some tweaks to the standout token contract like I'm thinking like we could actually have like the for example the transfer method right. If it had like a max amount actually explicitly there. And then the authorization, that is done inside would be on the max amount on the on not on the actual amount this food, that I actually I think allow us to get rid of even the pre-approved and later pay you know with like some other amount, that's smaller + +[44:00] I think, that we could do things like, that yeah maybe basically like to be clear like contract can require user outrights arbitrary arguments based on its input arguments. So it's not like you know we have said in each and every argument. So it's definitely possible to have methods where you assign one argument and the research, that just well passed around this. So you know again this similar to account abstraction like this opens up a lot of new approaches and tours and eventually like the token interface can be modified to account for this basically again I think the difference here from many other things is, that you know we want to make it easy to call things on behalf of the user and the source of interface you can come up with having this feature I think like more resources needed to actually come up with writing Pages for, that right. Because we can make things much simpler and safer than they are. When + +[45:00] only a single invocation can be fine pretty much properly. So I think, that's more of a question to with to the interfaces and not necessarily. Because yeah I definitely think I'm not sure I'm looking at the document right now. If we can have like a detailed the like description for how this would work with like non-deterministic arguments like I'm looking at your like your main example of like swapped in transfer and I feel like you know. If the swap is a strict send rather than a strict receive. Then you know you're not going to know what the amount, that is actually being transferred in advance or even during pre-flight. So I think maybe just like unpacking, that example would help. So I have an example of the atomic slope right and what it does is basically what you sign is, that you want to swap an amount of + +[46:00] token a for at least an amount of token B right this is what you're saying Yeah. So basically you send, that you want to spend it most amount of token a and at least the amount of token B, which I'm not sure, which and receive this message. But basically. Then the users who have signs, that can be they can have their tokens robbed atomically and the example I settle for the you know price for example. So not all the token is being withdrawn and as anyone ever use this Contracting week multi-stopper. When you have multiple parties like on both sides, that can solve it to each other. So basically my point is, that again the main thing, that you sign the thresholds not the actual amounts. And then definitely possible to just spend or receive business threshold without + +[47:00] any additional input from the user. Because again these are just saying the thresholders, that kind of works got it demo one question from moots or statements in the live channel is, that moose would love to see address identifier and like byzen 32s all of these kind of like converge and I know, that people have been pretty frustrated with the existence of all these different ways to describe accounts does your account abstraction solve this yes definitely right like this is about what I do in my prototype already and, that's definitely something I've been looking into. So basically they said like. When you want something authorized you just needs account and you want an address you just use address, that's pretty much it + +[48:00] so. If you want to do something for example on behalf of the current running contract you just call get current contract account, which is account type 2 right. So from the contract Traders standpoint where you don't have any additional it you may still need the contract ID for some use cases. But it will still leave this. But you know this is Project context. When you are interested specifically in the contract ID right. Then it will goodbye. But in all the operations, that involved with any manipulations I would say right you would just use a Content address types there is no identifier type anymore the bytes are just used for things, that are actually bites like a passual contrast City. But there's a not since involved into our spring work for the most part I said, that smart forward themselves + +[49:00] okay I hope, that makes moons happy it does make me happy great are there any other questions either internally or from the audience Dima. If people want to learn more about auth off next what should they do besides reading the document what's the document has links to the examples I think examples definitely a photo kind of have a grasp of how things may look like again you know this is the two broad Stitch and it may change. But you can see some general ideas where you can see, that accounting addresses I used everywhere not in the first stuff like, that how the calls have been authorized. And so on. So yeah look at the examples too just kind of especially like + +[50:00] luxuries like time work examples, that where you can see exact tips from the current implementation and the swap examples are something completely new, that is really painful to do right now, which is why we don't even have an example for example. So yeah I think, that's, that and yes and just ask me either as a document or Discord do you have things, that you think are not covered anywhere. Then you can answer it awesome. So after the holidays we're gonna meet up again and talk more about off and I think, that for today Justin do you have any other thing or is it a wrap no I think it's a wrap happy holidays everybody definitely check out the doc definitely watch out for more saurabhan design discussions coming up after the + +[51:00] holidays and everybody have just a great holiday season + +
diff --git a/meetings/2023-01-12.mdx b/meetings/2023-01-12.mdx new file mode 100644 index 0000000000..b582a5bb8f --- /dev/null +++ b/meetings/2023-01-12.mdx @@ -0,0 +1,158 @@ +--- +title: "Auth Next: Proposal Review" +description: "A detailed review of the Soroban Auth Next proposal, examining account abstraction, standardized authorization payloads, and their impact on wallets, fees, and contract composability." +authors: + - david-mazieres + - dmytro-kozhevin + - eric-saunders + - graydon-hoare + - leigh-mcculloch + - nicolas-barry + - paul-bellamy + - siddharth-suresh + - tomer-weller + - tsachi-herman +tags: [developer, soroban] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This follow-up session revisits Soroban Auth Next with a deeper technical review and open Q&A. The group focused on validating the proposal’s core ideas against real developer and wallet concerns, especially around complexity, performance, replay protection, and preserving simple signing flows for common use cases. + +Discussion emphasized how moving authentication into the protocol can reduce fragmentation while still supporting advanced scenarios like multi-step contract execution, delegated authorization, and programmable accounts. Much of the review centered on how standardized authorization trees, preflight recording, and host-managed nonces interact in practice. + +### Key Topics + +- Review of current auth fragmentation (`invoker`, `soroban-auth`, custom schemes) and why it scales poorly. +- Account abstraction model where contracts interact with generic accounts, not cryptographic primitives. +- Standardized authorization payloads that describe authorized call subtrees (not full call stacks). +- Preflight “recording” to deterministically build payloads wallets can sign once per account. +- How authorization forests allow selective signing of nested contract calls. +- Gas, performance, and depth concerns for large call graphs. +- Security considerations around partial authorization, front-running, and contract design pitfalls. +- Preserving simple invoker-style UX for common cases via built-in account implementations. +- Replay protection, nonce consumption, and the need for expiration or ledger-bound signatures. + +### Resources + +- [Soroban Auth Next proposal document](https://docs.google.com/document/d/1J-J3ClTUkrsLiJag906OH4hmNkZI3Jk6_Y9ZYt_psAI/view) +- [Soroban protocol and CAP discussions](https://github.com/stellar/stellar-protocol) + +
+ Video Transcript + +[00:00] Okay let's get started. So welcome everyone this is a Soroban Design Discussion in these meetings we discuss some of the core decisions, that we need to make. When we're building Soroban, which is a platform for smart contracts, that is in active development right. Now and is being integrated with Stellar today we're talking about off next, which is a proposal, that Dima put together to address some of our authentication and authorization needs on the network. So I think, that without further Ado this is the second time we're talking about off next the first time was before the holidays a lot of folks were out. So I think we're gonna start with an overview by Dima. Then dive into questions and ideally you will be able to make a decision or get close we would + +[01:00] close without decision today. So Dima can you take it yeah hi everyone you hear me okay yeah okay sounds good thank you so a quick overview I will probably skip on the motivation for the sake of time I will just say, that the general modulation is, that we are aren't too excited about the state the others currently. Now it's a bit too fragmented and to limit it in some senses well it's too flexible in other cases and it's not necessary. So this proposal tries to build something into the protocol, that would be those flexible. But also standardized so, that contracts can inter operate and the client software can Implement certain things just once + +[02:00] without tweaking them for every contract. So on a high level The Proposal consists of two big components the first being account abstraction and second being standardized signature payloads and invocation authorization. So to play on this point account abstraction is a well-known concept on in other chains and basically is, that instead of passing some entities, that are tied to some crypto algorithms such as some public keys or something like, that or instead of trying all those to say classic server accounts we make the contracts to operate on some abstract entities called accounts and from the + +[03:00] contraction Point account is just an object, that has an address, that can be used to throw balances for example or for any account related information or transfer balances to this account and it has a way to authorize the invocations of the functions it's not the contract in the contract business logic how exactly the account does, that. So hence the abstraction and the accounts themselves can be implemented both in a built-in fashion some of our built-in tokens and the building implementation will cover the classic store account. So they will all get their default account contract right. But they also can be customized right for example. If someone comes up is a new cryptogram like this Quantum resistance + +[04:00] for example right or you know there are Hardware votes, that currently don't support her at all it is possible to write an account contract, that will perform this custom authentication. But the possibilities are much better than, that it is possible to have a literally complex multi-6 schemes, that are probably currently not possible the class Explorer accounts for example it is possible to record different mode async or different tokens spent basically this is a smart contract I guess, that's the bottom line of it. So yeah this is the first component and the second component is about the signature payloads. Because again currently signature pivot system of preform we have like the SDK implementation of some structured + +[05:00] payloads. But this is just in one SDK it's not 600 days in any way. So the proposal here is, that instead of relying on contracts to Define their own signatures schema we provide the generic ones, that should hopefully be flexible enough to keep all the use cases. So the payload obviously would include some context information such as network ID. But the main thing and the main Innovation here is, that instead of just signing a single contract call it is possible to sign a whole tree of course or even like Forest of trees nature okay + +[06:00] basically from the contract standpoint. When the contract requires an account to authorize family vacation the Urban cost like the power wheel add this into a post deck of all Trace calls and build a context Tower. So basically. When authorized this code from some contract the payload for this call should contain obviously the currently executed Contracting function. But also the call stack of the authorized invocations leading to it and the contract provided arguments. So basically it's only since a contract needs to provide these arguments for the authorization and everything else is inferred automatically based on the current + +[07:00] Coast Tech and stuff like, that The Proposal also includes, that generalized Finance Management okay I think we wanted today different this. But for the sake of overview ready is, that cost will contain we will manage nonsense per contract and per account address so, that basically neither regular contracts nor account contracts need to worry about, that and this comes with some benefits like nouns can be consumed only for the top level invocation and not any subsequence of contract calls plus Ledger access and more flexibility for the users, that's rather benefits uploaded. So now + +[08:00] so these are two key Concepts and I think another interesting high level point is, that things housing is Paradise for the cases of complex contracts PreFlight can be utilized to build this the signature payloads this is like. When we are talking about the contracts, that have more than a single signature like it's a current world or modern single authorized and location it's a house next World it may be tricky to figure out like what exactly needs to be signed like the contract may be delegated to some contracts, that dynamically depends on their input and things like, that. So in order to figure out what exactly needs to be signed in order to be able to execute a certain contract call + +[09:00] we introduce a mechanism in pre-flight, that we'll basically Trace those authorization calls and build the proper payload and the cool thing here is, that this stage like there is no signatures participating and basically no commitment to any security reasons from the user. So the only thing, that the private needs is the other addresses of the account, that participate in the invocation what else yeah I think, that's pretty much it on the high level I just think quite like. So there's a bunch of other things. But I think we would want to look yeah I think, that's a great starting point and we can open it up for questions I'm especially interested in the perspective of the dApp developer. So + +[10:00] Paul. If you have any comments there I think, that the original auth, that we built or what we. Now call Advanced off the point in, which we realized it was a bit problematic is. When we had you know dApps and walls try to implement it and it became clear, that it's you know it's a lot to handle. So I'm really interested to hear about this one in, that perspective foreign talk a little bit more about what actually goes into the signature payload specifically like do wallets have to sign only the sub tree sections were like account the authorized is called or they have to sign every like cross-contract call or yeah as I've mentioned basically their trees over the coast not to exit full trees or whatever as a contract calls + +[11:00] basically this is a subset of the whole country of the contract were the current account oh not currently there can't be interested in all Traders something. So let's say we have an account a, that once. So sorry we have a contract is, that once the user to authorize it. Then it delegates to contract BS, that doesn't touch right they return. Then B code CZ, that's authorize the users and, that cost tags are user Sciences a. And then C B will be omitted. So you know there is no time in a sense for all well it seems much more flexible right yeah, that's okay. So I feel like a exchange swap for example you would really only have to sign like the token withdrawal from your wallet yes sir like basically yeah you need to still approve your token to some router contract. And then router contract can + +[12:00] route your token in any direction or vice versa like you may sign like some low levels of operation, that just swaps you with someone and you don't really care how exactly this is being routed again your signature is required under this bottom level operation. So both ways work and also an important note on signatures, that I haven't mentioned is, that wallets would sign the hash of the whole payload and this is mostly for the sake of compatibility it's whatever hard of our wallets and possible features basically we want to bound their signature pivot size like the actual signature go outside it's not premature yeah. So the wallets take the entire subtree hash, that. Then sign, that. So there's only one second sure okay cool thank you yes Dima yeah I have a question about and I'm going to steal a list questions + +[13:00] What In the case, that you want to pass an invalid signature one of the subcontract with the purpose of taking a different path in the contact execution right and explicitly you want to fail one of them yeah this is here okay. So like I mean I'm interested to see like an actual use case for, that. Because from there like this only influences pre-flight right from the preference standpoint like I describe this feature where we can record the authorizations. But the side effect of recording them is, that we treat them all as successful it is not impossible to mock failures in the preferred API it's like basically not a protocol change I'm not convinced this is something we actually should do and it's really unclear to me why would you ever want this to be the + +[14:00] game basically in my mind like. If it's fine for the authorization failures to be recoverable for example you know you want to clear like multiple trades and at once and in a general case trades may not clear for multitude of reasons one of them being, that signatures for the trades for this trade are incorrect and you don't want for example to fail other trades. If you're just patching things together right this is a fair case. But I think this case is only meaningful during the on-chain execution I think it is fine for the pre-flight for example to execute the hate this scenario. Because it will be probably it will consume more gas and build access knowledge your entries than do it in case of failure. So it will make it superset of what is executed in chain + +[15:00] of course it is possible to write some really weird contract Logics, that would try to reorder trace the account. But again I'm not quite convinced, that ever a good idea and there won't be a good support for, that from the pre-flight standpoint for example and. But but I think there is a pretty good reason to not support it. Because you kind of have this recursive dependency where like you want your signatures to depend on the function input. But then. If your signature defines like what the signature should be you get this basically cyclic dependency rate. So I'm not convinced this is a good idea basically like what I would want contracts to do normally is, that. If you call contracts function with certain arguments it comes with a certain signature preload and this + +[16:00] relationship should be deterministic right and it should not involve signatures in the input, which is actually one of the problems with the current Advanced awesome. Because we are like are we used to like include some information from the signatures. While building pillows for another signatures and it's pretty messy and how to painting and power. So yeah and another question this change implies, that you need to do the cryptographic verification posts transaction execution right are they very end right. Because you need to build the pre-image and in order to build a pre-image you need to run the contact right. So not on the simulate side but. When you actually go and execute it you need to run the entire smart contact + +[17:00] three right and only at the end you have the pre-image, that you can verify with signature, that's actually not true right like it's a bit of an implementation detail and I think it may change. But in the current prototype what happens is, that since we kind of decouple authentication and naturalization well at first the whole tree has to be passed in some way along the transaction right. So the transaction should know in the first place whatever. If you are claiming you have time. So from the smartphone perspective for example it's possible to examine the whole tree, that's trying to or trades from the authentication perspectives there is just this curved payload, which is the hash of something and from the + +[18:00] authentication standpoints the only thing we need to do to it is to basically verify the signature of this payload is correct and in case of mounted sync accounts find all the signers right. So basically the current implementation the authentication happens. So easily the first time the account writes to trade something and actually the same goes to evaluating the whole tree over the course in the vote like the latter part may I may not change. But I think yeah there is really no need to do anything until the end of the contract execution we just do verification as soon as possible. So basically you've like. If Thompson is not like. If some were deep in the subject there is a cause, that hasn't been authorized will find out about it as soon as we reach this point. But not + +[19:00] I see. So the entire tree is effectively submitted as part of the transaction right yeah it is and yeah I have seen your suggestion on the dog, that maybe instead of passing like the whole show explicitly we made Parts ashes of its nodes, which probably may be fine in terms of the transaction sites. But yeah it really only matters for the transaction part of the things not for the actual execution yeah. So my concern here is, that people are going to use the existing contact a infrastructure to develop like deep executed contact tree right and. When that happened we can have several hundreds of contracts executed right per transaction and I just kind of like a horrified of the idea, that as a result of, that we're + +[20:00] going to have hundreds of signature in each transaction account. So again there is only a single signature per account right and said like the signature flow is you take the payload, which is basically a network plus a vector of the invocations you want to or trade you hash it you sign the hash, that's it ah. So there is only a single signature. Then the first time execution time is the first time the account tries to try something we verify the signature. And then we will just do basically matching like weather resist invocation is present in the tree or not. Now to the Deep tree topic I don't think it's the new route it has any relevance to this. Because it's not like it's impossible to close up to contract. Now + +[21:00] and the bank accounts versus these right like you cannot probably execute a hundreds of contract calls even. If they don't do any actualization shift. Because it will run out of gas before, that like we are going to have gasoline it on the transaction. So and I think it's completely it will always be a better idea to have a single contract than a multiple contracts than multiple contracts into these things and. Because cow in a contract comes at the cost. So yeah next week very differentiation yeah right thank you can I give us a question about the to do with this you know what gets signed in the subtrees and going back to deem of a example you provided. When you. When you're answering one of polls questions you describe a situation where an + +[22:00] account is signing like, that you're calling a contract and, that contract is calling three other contracts a b and c and this account just wants to sign the call to contract a and the call to contract B in, that situation is, that does, that mean, that count a science two sub trees like separately. So they provide two signatures Play sign a and they sign b or are they. Because a few times it's been mentioned, that an account only ever provides one signature is there some way, that somehow they sign those two sub trees. But it's only one signature or how does, that work yeah. So basically as I said like technically you're not signing the tree you're a sign in a forest of treats. So for example. If you have a top level router contract, that doesn't require any authorization. But then you make two subcontract calls, that are authorized, that would spawn to all trees + +[23:00] right. Because they will have different top level or trades nodes, which I think is fine like you still need just one signature over, that as I said I'm always saying like one signature per account here is an account object it doesn't mean like, that it's Unique so. If your transaction accepts two accounts and they happen to be to have the same address very long try to duplicate them. And then there are a little bit of signatures. So you know. When I'm talking about like single short per account, that's only in the context of, that account like in the context of the whole transaction there may be a return number of accounts and some of them maybe duplicates and they will have their own signatures too so. If you don't try to be any more collaborate here. So it's yeah one per account not more okay. So in, that situation where + +[24:00] account one wants to sign contract a and the contract tree a c. But they don't want to sign the top level like the very top level contract call they would provide two signatures they would sign like the sub tree for contract a the subtree for contract C. And then they would include those two signatures is, that right okay. So at first I want to make points, that I think it's more like it's important to practice say, that it's not, that account wants to do something it's what the contract wants to do. Because the source of the signature pullouts in the first place is the contract it's not the account the count row is to verify the signature like an account contract account in a more broad context or always to sign stuff eight and seven. So basically depending on the call tree you + +[25:00] will get different results for example. If you have a contract as I said, that calls independently into say contract B and contract c. But you provide it once at the top level you'll get a single payload. So let's say you have a contract a, that doesn't require authorization itself. But it does go into contracts bncs and direct prioritization. If you provide contract a. Then you will get a single payload with both invocations. But on the other hand you could just pre-sign contract B and contract C. And then write like you don't want to know the user to know about your top level contract. Then your top level + +[26:00] contractual. When you take care about having two different accounts right. But it will be still possible to forward the signature. So basically it's whatever the contracts Define and whatever you try to prep white or build signatures for it's driven by the contract interfaces. So I mean it's probably a good idea to try to implement things in a way, that only a single signature is needed so, that you know we have some Atomic operation per account, that may involve arbitrary number of subcontractables. But nothing limits you from you know using small result present calls. But you will need to this needs to be reflected in the contract interface and implementation, that's right. So what you're saying is, that contract a could just sign the entire tree. If they wanted to + +[27:00] yeah like you would need some criticize instead of so. If you just. So just ignoring PreFlight for a second just assuming, that the developers here know how to build these trees themselves they know how to do all the signage themselves I'm not really too interested in the automation just yet. But a developer could choose to sign the entire tree and they would be signing a b and c or they can choose to sign just the subtree A and Justice subtree C and include those two signatures basically yeah like as one is your Forest of trees corresponds to what has been authorized you should be fine. So you know. If but basically what you can do like. If you're in full control of all the execution paths you will just look at where your authorization secure and you would include all this into the signature pivot where signing it. And then you basically can embed this sub + +[28:00] trees in arbitrary called trees you care about as well for example the same topology and a signature Bionic a signature on a tree will just mean, that whatever the address is for, that signature is authorized anywhere in, that tree where, that address shows up is, that correct authorized anywhere the contract calls all tries on it right. So there is anywhere contract requested authorization I have a question related to this about authorization context and the call stack so. If you invoke a contract a, that calls B and you know B is the only one, that calls authorized. Then a is not part of the context how long someone to front run a by submitting B I'm wondering. If there are any security issues or weird use cases here and. If we + +[29:00] should consider requiring the full call stack by default with the option to authorize the subset the cost stack. And then the concern here is a high level. If it's possible for the front run to prevent I expected logic from running well it sounds right like at first, that was a consideration to include like the full cost tax. But the scenarios like we have just discussed such scenarios like it's rather limited and makes use cases like exchanges much trigger to implement properly. Because you know you may care only about like swapping the token and you don't want to care about like how exactly it has been solved and. And so from training some contracts would indeed need some front-end prevention in place. So for example you know coming back to the batching the operations example right. So let's say your contract clears multiple swaps and swap may Fail for multiple + +[30:00] reasons one of them is run signatures another one is front run right someone front Trends is Swap and it's no longer valid right this case is really not different from any other failure like just write contract in a way, that it gracefully handles the failures and you should be good as for security issues I mean it is possible to write a contract in such a way, that you can transfer into signature for it and it's one to the right thing. But it's probably on the contract. And so I can implementation issue right normally what you should require is easier to sign should be it should be a some sort of atomic operation, that the user should intend to perform it shouldn't be some middle step. So for example. If I want to create a clinical balance for example a simple thing right I + +[31:00] want to transfer the token to the contract and I want to create an entry on behalf of this user I could drive this incorrectly and take just the signature for a transfer and you can from transit signature quarter transfer but, that's obviously about what I should do is I should reverse the authorization the whole thing I'll use the pro or whatever. So I mean bugs would exist for any sort of authorizations. But I don't really see an inherent security risk here. And so on as you don't require the user to sign some non-atomic things, that you will build storage account. So let's say coming back to the swap example let's say you did something based on the swap outcome. Then probably you would want to or trades your top level contract, that does the swaps. Because it does something else will probably need to be or trade right and + +[32:00] you don't want it to be functional right. So and also there's a front training prevention topic never mind okay go ahead yeah. So like the you said it makes sense like the one thing I'm concerned about is you know the authorization context is determined by the contracts right as you mentioned and the user is just going to sign whatever pre-flight returns. So and these bugs are not, that like you know. If you don't have a great understanding of how this authorization, that these authorization Mechanics Work you may not see it. When you look at the contract and the user is just going to sign whatever pre-file returns. So it may not be you know at any point easy to see what the issue is until it's too late so, that's just my concern I don't know. If no I'm still not sure. If this is. So go ahead yeah I know my intuition is, that again this is probably the case for any sort of traditions like let's say you understand the whole tree like do you really think, that add in + +[33:00] like a bunch of stuff does a tree would like prevent some weird cases. When you sign something, that you didn't think yeah I'm not sure. But I think a great thing, that structural payloads are always it's it should be easy to implement a generic forward support for just parsing this tree and finding the relevant code you know like. If you send something to The Exchange, that it's supposed to sell token a for you and you get your payload and your vote says hey this is going to withdraw like 10x of the token a, that you actually wanted to trade. Then you know you probably won't find this and wait you wouldn't worry about this too much. So yeah there is definitely like more control from the interesting point in a generic scenario I agree, that some things might be vulnerable and + +[34:00] these are wouldn't know about, that. But I'm yeah I don't have a good understanding of how prevalent would this be or how dangerous would this be. Because well. And so it could be contracted a flowed in multiple wage rate and you wouldn't know. So it's I guess the question of trust and you know only using the things, that matter and the fact, that we at least can easily know what the contract trying to go to your tokens or whatever other Commodities you have it's already a big wins here. Because like kind of limits the impact simulator yeah thanks Dina any further questions + +[35:00] Lee yeah I have a question. But it's a little bit derailing. So I feel like I want to. If anyone else has questions, that are moral on the line of what we're already talking about, that might be best to do first the URL awaitly. So I'm interested in Anna have a proposal interacts with the simple invoker auth case, that we currently support today. So I think. When timer was describing before you know we developed the current version of sorbenorth we realized, that it's actually relatively complicated to use. And so then we went and had added invoker auth, which is basically the same as message.sender in the Ethereum world and the way, that it's implemented is the source account on the operation is the invoker. And so a contract can just say who's the invoker and they'll either get back and address they'll get back an + +[36:00] address, which is either the source account on the operation or the contract, that's calling them yeah I saw I understand, that the proposal basically says okay there's the proposal replaces the complicated and the simple auth we have today and creates a single unified interface, that contracts use to verify, that the address is authorized I'm just wondering like. If there's a way, that we can retain the simplicity so, that for the vast majority of contracts enter the wallets and contract interactions they still have the simplest form of in terms of like what they need to sign it's like the simplest without needing to go and sign these call statues + +[37:00] well I mean the host Tech tree for the case, that is currently an invoker case it's pretty much equivalent to just the conflicting location right. Because it consists only of a single invocation and you know you just add a vector around the things, that already exists. So even. If that call has like. So I'm calling just one contract but, that contract may be calling five other contracts as a part of its what it's going to go do those calls may be unimportant to me. So maybe they're just calling out to an oracle again some information again Nano Trace calls are not included in the payload wait this is an important point I want to reiterate again, that what these are Sciences only the authorized upset of the whole country + +[38:00] country. So it doesn't include any in between or side calls even. If the authorized call is the top layer. So even. If I'm like the top call is going to be authorized Ultra is doesn't propagate anywhere authorize means, that I'll try this and only this added in the current context, that's it doesn't say anything about inside invocation so, that curse would not contain things, that are not done on your behalf. So whatever the contract calls into doesn't matter for the secret signature. So for the case where invoker is used currently I mean invoker cannot be propagated currently right you cannot forward the invoker to the other contract, which actually they have seen from discussion it's kind of a bit annoying and confusing right. Because you may sometimes want to propagate it + +[39:00] but yeah this is a side like in this exact use case. When you don't propagate the top level account anywhere the signature pillow it will be almost exactly the same module of the different structure rate instead of like having a single contract and invocation you will handle it support into a vector and, that's pretty much it and you will have a signature a few different plates. So you know from adapt perspective it will be marginally harder to build this payload. But you know doesn't need to account for anything new. So just need to use a bit different structures and from the transaction State Savings endpoint like we are keeping the invoker making sure to like save on signatures and you know. If the source account thanks a whole transaction and the contract call it is possible to mark, that it is an invoker and do not + +[40:00] require like a second signature. So the optimization part is there two and yeah the only thing, that kind of changes from the contract interface 10 point is, that yeah you need to pass the account explicitly and intercourageon account we may or may not introduce some sugar in SDK to simplify, that and make it. So you don't really need to type what you want to authorize. But you know it's like one line of code we also want to do this for the sake of Click so. If you can. But I don't think it's too big of a an issue to basically do this. But on the other hand like it is. Now possible to for the constant to do things on behalf of accounts in the sub course, which is not possible within local current, which I think is yeah of course + +[41:00] yeah I think an optimization like. If if there is some way for us to retain an optimization where The Source account could still be used to authorize I guess the. Because I'm not really too concerned about, that one line of code, that a contract has to add I'm more concerned about what, that the story is for a dApp or a wallet developer yeah. But the transaction you know a number of things I need to sign like a you know like a hardware wallet needing to sign the Stellar transaction as well as this blob, which is like two signatures. So two rounds of signing they need to go through versus. If the. If 99 of contract calls really only just need, that Source account you know they really only. Then have to sign just the transaction once, which is yeah I think you definitely want to preserve, that it's increase, that for + +[42:00] the signature key test. When you know you'll pay for the transaction, that your qualities or one contract it's definitely easier to just passing a signal curvy. So basically yeah it doesn't go anywhere. But it will be possible to do everything in this single signature how frequently this will be used compared to something else we don't know yet. Because we have discussed a lot of the cases and you know some third-party page for the fees. But you know, that's something you've got a secret you know it's a case for the user to just send a transaction to Samsung do to your business and this multi-help transactions hopefully possible to sign other corporations at once. While still being compatible with something + +[43:00] are there any other questions either from anyone on the stage or the audience I have a minor question, that maybe like you can explain like in the Prototype why is authorize called authorized instead of something like verify authorization I think, that was done intentionally. But I don't recall the reason I no I don't think there is reasons okay yeah I mean odd trays it's like I don't know I'll try this current invocation but. If it's confusing you can use whatever name defines you to go to college verify, that's radiation as well check I don't know foreign + +[44:00] okay. So I think there's yeah I think the last one we touched on, which is having to sign multiple payloads for the even for a simple case I think, that's a no-go I think we need to either optimize for, that or scrap this at the end of the day people are using external signing applications people are using Hardware wallets and we need to figure out for the simple use case how to sign a single payload yeah okay. So let's stay. So I'm yeah I'm not saying this is going to go anywhere. So as simple use cases will be possible. But you know for the complex use cases you may end up needing to send two things + +[45:00] but yeah do you mean the simple use case would be satisfied by just keeping the existing invokerol with capability or are you meaning, that we would introduce an optimization into this proposal so, that the contract still uses the same method they still call Dot authorize or dot verify authorization yeah nothing changes somehow comes from the The Source account Yeah. So basically we don't truly awake the whole driver for this proposal was to kind of have unified account oh sorry unified Contracting rupees and you know I find it kind of problematic, that you kind of hard code your contract to use invoker right. If you don't use Advanced host on the current approach and, that's kind of bad right. Because if you want to use this in a different context you cannot it's hard cut a teas and Walker. So the contract interface itself uses + +[46:00] what's next it uses accounts and addresses and the abstraction and you know one of the implementations of, that abstract account even, though it's kind of built into host. But it's just invokers rate it's just one of implementation of the abstract account contracts shouldn't worry about, that it's all implemented in the host and it's in the current prototype as well. So yeah let's Chooser and by simple use case I mean the cases. When there is only single accounts it needs to trace operations you know throughout multiple accounts and one of them can actually can be invoker but, that's reason to pre-size something. But they want me to sign the storage induction. So hopefully you know a majority of the case is only single signatures needed. So just to be on the same this is implemented in your host prototype for off next right. Now yeah it's in the Pro + +[47:00] Tab right. Now so this is an important thing and it's important and is the way, that works, that you it's only the top level contracts but. If they call Dot authorize they would see the source account as being authorized yeah they actually will see wait basically the trees teeth sharing the transaction payload. But instead of cutting the signature you pass nothing and instead of passing the account ID you pass the flag. Because it's an invoker it's similar to how we handle invoker currently in advanced cars it's just move to the transaction structure itself right I'm just wondering. Because in Vegas today is only it has no depth like yeah. Now it has depth yeah. Now it will have depth. Because the transaction has the whole tree of the coast. So basically you are signing the tree + +[48:00] itself, which is why it is safe okay. So so this the way it's currently implemented it's not really messaged us and it's like the transaction.origin from Ethereum it sounds like is the way, that it's working I'm not sure. But I think the key factor here is, that you explicitly you still explicitly out price calls right it's not like you just gave for a contract blanket permissions to call things on your behalf it is really just it's keeping the signature verification part. Because you. When you're assigning the transaction or operation you are also assigning the whole authorized call stack payload, that you've been talking about before. So yeah. So basically yes the world would need to show you, that hey as a part of this operation you + +[49:00] maybe a sign in multiple token operations for example right. But you know from the signature standpoint it needs to still be sanctioned once. So it's basically like msg.center. But better. Because like energy the center is like contingent on the fact, that you have signed the top level and location here you sign all the invocations, that you need to try yep okay yeah I'm the reason I just mentioned, that the transaction of origin is. Because I'm just trying to think of. If there's anything, that we really need to worry. Because it feels a little bit more like transaction.origin in the you know you could you're the source account you call this contract either calls another contract you tell, that other contract, that you're authorizing as the invoker so, that other contract is authorized + +[50:00] with the top level Source account yeah I think it's probably fine. Because you're right it's reflected in you still have to specify yeah right there is really no way no matter what kind of authorization you use. And so it's no way to somehow hide the subcontract call, that prefers authorization. So there is really no way to hide some token withdrawals number deep in the trade those still circuits no matter what or it will just fail. Because you haven't seen it yeah any other questions from the stage of the audience okay. So I think you know dima's done a great job at answering all these + +[51:00] questions I think there's a still most of the concerns are around the you know what is the contract for the Gap developer and to get insights into, that I think we need to regroup and probably come up with some you know minimal prototypes, that help us understand, that. So hopefully, that's something, that we can put together soon enough and discuss again and. If no one has anything else they want to ask or to add. Then I think we can call it a day yes maybe. If you have a few more minutes I have one question it's yeah about the non replay protection. If a transaction fails. Because the + +[52:00] call stack change will the nodes be consumed or not yeah the non-consumption is actually trick using may need to think more about, that in the current implementation nonce is basically tied to the top level or try it's called, which may or may not be the right thing to do, which means, that it's something fails basically okay. If the whole transaction fails The Ledger state is completely rolled back those announced won't be consumed. If a top level contractor tries succeeds. But some subcontract call fails and + +[53:00] the top level contract handles is Grace collisions and not smart Bureau project thank you okay we may need to discuss this further. Because this can be an issue with like at some point the transaction fail. And then becomes available again. Because the code stack is available again someone can just run through you and replace the transaction doesn't make sense yeah I mean the transaction replay. But basically again frontrun is kind of weird. Because it's it shouldn't be an issue most of the time. Because someone would do what you wanted to do anyway yeah. But what you wanted to do at one point may not be what you wanted to do in the future. So let's say I want to transact with one inch to spend my + +[54:00] money. Then these fails. So I spend my money with another Exchange. And then someone replay the previous transaction and spend my money again like we can discuss this more. But basically yeah I think like I think this kind of brings another topic, which is exploration, which I don't think we have discussed and they didn't do anything in the proposal it may be a generally useful feature to have you know for your signature to have expiration and, that even could be a part of the standard payload so, that we could distribute expired payloads. But yeah I think it's kind of complementary to our question okay yeah the Norms could be Ledger bonded or + +[55:00] time bonded, that may be a good idea okay yeah we can discuss this and yeah it would be nice to try to come up with basically some Sooners and the sink they can get abused somehow. Because thank you thanks Tebow. So we are approaching time thank you so much for everyone and Dima especially for you for prepping this and for answering the questions we will regroup and update on Discord thank you all + +
diff --git a/meetings/2023-02-23.mdx b/meetings/2023-02-23.mdx new file mode 100644 index 0000000000..cc72bbd412 --- /dev/null +++ b/meetings/2023-02-23.mdx @@ -0,0 +1,158 @@ +--- +title: "Rent Payments and Bump Primitives" +description: "Discussion of Soroban state rent, archival storage, and configuration upgrades, covering rent mechanics, security tradeoffs, and approaches to managing large network configuration data." +authors: + - dmytro-kozhevin + - garand-tyson + - graydon-hoare + - justin-rice + - nicolas-barry + - siddharth-suresh + - tomer-weller + - tsachi-herman +tags: [soroban, CAP-46, CAP-46-9] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session combined two major threads: how Soroban network configuration should scale beyond ledger-header limits, and how state rent and archival storage can bound ledger growth without breaking contract safety. The discussion emphasized that Soroban parameters (like metering and fees) are too large and flexible to live in the ledger header long term, motivating new upgrade mechanisms tied to dedicated ledger entries. + +The second half focused on state rent and archival mechanics. To prevent unbounded ledger growth, Soroban introduces rent-backed contract data that expires and moves to an archive. This enables pruning unused state while preserving recoverability, but introduces subtle security challenges around replay protection, versioning, and key collisions that must be addressed at the storage-model level. + +### Key Topics + +- Limitations of storing Soroban configuration (metering, fees, limits) directly in the ledger header. +- Three approaches to scalable configuration upgrades, with strong interest in using Soroban ledger entries as upgrade inputs. +- Validators voting on hashes of configuration sets, not raw data, to avoid SCP bloat. +- Using Soroban and ledger entries to make configuration proposals on-chain and auditable. +- Motivation for state rent: bounding ledger growth and incentivizing cleanup of unused data. +- Archival storage model using Merkle proofs, with validators storing only archive roots. +- Security risks from archival state, including nonce replay and restoring outdated data. +- Three Soroban storage classes: + - **Unique storage** for data that must never have multiple versions (e.g. nonces). + - **Recreatable storage** for mergeable or replaceable data (e.g. balances). + - **Temporary storage** for short-lived, non-recoverable data. +- Contract-level responsibility for resolving key collisions when restoring recreatable data. +- Role of preflight and RPC nodes in fetching archive proofs and preparing transactions. + +### Resources + +- [CAP-46: Soroban smart contracts framework](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046.md) +- [CAP-46-9: Scalable network configuration storage](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-09.md) + +
+ Video Transcript + +[00:00] So Darth do you want to give us a quick overview on the scalable configuration issue yeah I can do, that can you hear me yes I can hear you okay perfect. So yeah the issue at the moment is, that all current network related settings are stored in The Ledger header and this isn't a good place to store you know the Soroban settings. Because they can be quite large I think the current estimate for meeting or alone sorry for metering alone is. When kilobyte. So before we go through the approaches I'll mention, that they all assume the use of dedicated Ledger entries for storing settings as described in [CAP-46-9](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-09.md) and, that the settings will be created on protocol upgrades the only differences in these approaches are how the settings themselves are upgraded. So approach one, which is described in [CAP-46-9](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-09.md) this requires a mechanism for validators to introduce and distribute setting upgrades directly to the overlay the validators will. Then vote on the hash of the flooded settings + +[01:00] approach 2 allows users to use Soroban to introduce possible setting upgrades into the system using arbitrary Ledger entries allowing validators to vote on one of these entries to overwrite an existing setting we use Soroban here. So accurate fees will be charged for the large for the larger operation and Ledger entries, that will be introduced there's still a question of how the user will interact with sore button we can add a new option to the invoke host function up or rely on a smart contract itself to create these entries approach three is similar to approach two except, that a new Stellar classic operation will be used to create the entries, that validators will vote to use to make this Opera to make sure this operation isn't spanned the idea here is to make sure the to make the operation withdraw a large amount of excellent from The Source account. But this idea needs some additional work to guarantee, that the operation always has a sufficient balance for the withdrawal once the operation has been flooded. Because you don't want the + +[02:00] operation to get flooded and fail. Because the network will have already flooded the, that doesn't work for the operation. So yeah, that's the summary of the three approaches and I know their opinions on and I know their opinions on all these. So the yeah this is Homer do you have any opinions on what would you prefer Sid I have a question. So can you tell us a little bit more about what is stored in, that in these network configurations like concretely and why is it important to preserve these, that data. So for example from you know for + +[03:00] metering we need we have different parameters on how specific things get charged. And so the structure will have you know say like you know a bunch of u64s, that indicate how like specific settings should be charged and, that is what sorbonne will use. When deciding how metering should be used. But we want these settings to be configurable, that's why it's not just hard-coded in Soroban the settings will be stored somewhere and, which and the value should be able to upgrade the settings whenever they want. If all the validators agree does, that make sense yes it just it's it doesn't sound like something, that would change frequently between two subsequent Legends right. But it's more something + +[04:00] like yeah. When we'll go to upgrade to protocol Version Y. Then we need to make, that update am I cool we don't want. to tie the upgrading of the settings to protocol upgrades right for example right. Now we can update I believe like at the Base reserve. If we want to without updating the protocol right so, that we want this to work the same way and well I agree, that we won't update and well I agree, that we won't update the settings very often it I wouldn't be surprised. If we you know did it twice in this specific protocol version it's just generalizing what we're already doing a letter header to a much larger number of number like this it's in a potentially unbounded number of numbers, although it's not like enormous it's not thousands and thousands of entries. But it's probably tens or possibly as much as 100. So yeah does it make sense to do something + +[05:00] in the like of having an optional field in the Ledger, that would have, that data and in any subsequent Ledger header just say hey in the in Ledger number XYZ we've last updated these parameters. So I mean, that's how I think we're describing is how like current upgrades currently work like you just you submit the upgrade to upgrade specific setting the issue here is, that the settings can be here is, that the settings can be quite large right. So it's not it doesn't like you know sending a one kilowatt XDR blob in the current mechanism wouldn't work it actually wouldn't work at all next in the with the current xcr. Because we have a limit of I think 128 bytes per upgrade but, which is why we're exploring like a more scalable solution + +[06:00] Ed can I ask you a question about the other approaches, that are the other approaches, that are not approach number one here in approach number two for example is this is you sort of you've got two things, that are in play one of them is sort of propagating promulgating the proposed changes the set of options and one of them is a consensus vote. If people vote on two different config settings you know what happens in approach to like there's still a consensus conflict resolution problem the same as you would have in the first one where there's this arbitrary you know take the largest upgrade choice, that the consensus layer is doing in approach two what does a consensus player do. If there's two con two conflicting votes it would be the same as what it is today right like. If you. If two. If there's two different votes on say the Base reserve I don't actually know what conflict resolution is on the Base + +[07:00] reserve right. Now my point is, that it is currently happening in the consensus layer and it approach too it sounds like you're trying to take it out of the consensus layer. So then oh I want to say, that all the approaches actually are the same in terms of voting like in any case what leaders would vote on the hash of the upgrade set of the config upgrade set and the reason for, that is, that well we don't want to blow the SCP values as well. Because you have plenty of them and even the update upgrades are rare event we don't really want to have huge spines during this forever. So voting always happens on the hash of the config upgrade set and the default mechanism for consensus is, that. If you don't agree this is upgrade evolved against it. And then there is not an upholds yes yeah. But so all of this happens very quickly during the voting round and + +[08:00] I'm not concerned with the I object to this vote like that, that's not likely to happen very often unless someone just you know doesn't schedule their validator it's the combining thing. If validator a uses you know arms upgrade for upgrade set a and validator B for upgrade set a and validator B for B then. When someone hears those two or nominator here's those two or whatever they do a value combined right they do a SCP value combined yeah this is this a not combinable right like, that okay you need a vote and for the contract upgrades is a hash of the whole set of upgrades, that need to be upgraded atomically like. So any conflict just fails immediately as we don't try like in option one I guess it is possible to do something weird there or you can you know have some + +[09:00] fuzzy matching of the upgrades you know you say like this operation should take thousand units of gas and I say it should take a thousand one unit of gas and maybe it doesn't matter in sand. But it kind of introduces too much unnecessary surface for potential issues and bugs and sure I'm just saying, that looking at the document, that I'm reading right here it says, that the CAP picks the largest upgrades, which is arbitrary and requires some sort of conflict resolution it may want to only vote for a specific hash so, that sounds to me like a conflict resolution mechanism. But I'm just trying to clarify whether, that's, that's real or whether it's yeah maybe, that's not one or yeah maybe, that's how it is. But I think in all approaches idea for voting is the same like revolt on the hash of the whole upgrade set and Valderas have two agree on this hash exactly. And then the question is normally how we distribute the premature of this hash instead I do have a question on kind + +[10:00] of like the implementation burden like what is between the different approaches what is you know what's the low hanging fruit and what's what requires like additional mechanics I would. So I would say approach to should be the simplest correct me. If I'm wrong. But it shouldn't be hard to you know add a function Soroban to you know create these entries. And then you know. When the like any mechanism to interact with, that function should be simple either a new operation or the invo coast function up or I think using smart contract is a little more complex. Because I think there's some there are more details around, that. But I don't think like I think in my opinion approach to the simplest approach three yeah I think we have to be careful and to make sure, that it's not spammable + +[11:00] an approach one requires work in the overlay in the consensus layer and is the reason approach to is the simplest. Because it used it reuses the metering mechanics, that we have in Soroban yeah, that's, that's the only reason we're putting it in Soroban itself. So what does, that I would say I mean approach one is not hard either in the series to protect care for, that and they both approaches I don't think there is too much work anyways this one problem is there for surabana too like we figured it out discussing this concerns for example you know by the transactions it install vasms they can be pretty big too and we don't want to flood them for free basically and I guess in this sense I like it's maybe not necessarily a very easy problem to solve. But we need to solve it anyways. So kind of option to just piggybacks on + +[12:00] this file with options three I guess we need to change something in classic mechanisms to make appropriately. So yeah I kind of from drinks it probably too is the best it's a long time for the week and use the amount of work additional work required yeah could you speak to the complexity of the consensus change, that's described in verse one it says, the consensus changes it would be complex is, that like. Because it involves the item fetcher. Because we would have to have a second like the way we do DX pitching is, that it yeah I mean it's again I haven't traveled I haven't triven this point about consensus layer complexity it doesn't it's complex per se just literally reuses the TX set logic. But the except logic is arguably not very simple either yeah replicating yeah I mean on one hand it's trivial + +[13:00] that, that we copy something, that exists. But we are copying something complex. So maybe it's really better to just use normal Ledger mechanisms to put this interest into lecture and yeah not bother about I didn't yet another flooding problem the one question I have about, that is, that the consensus module has to actually be able to judge. When it has a given hash right. If there's a vote for hash X it has to. Then ask The Ledger hey the hash X actually exist. Because I'm not going to vote on a config setting, that I don't have right like in The Ledger right. So this is just moving it from moving, that question from item fetcher over to a ledger inquiry and, that's a synchronous Ledger inquiry in the middle of consensus right yes okay I have no idea. If this is a big problem or not hopefully are we okay with, that I mean, that's kind of the risk of my mind. But maybe the maybe, that's okay I + +[14:00] think, that's probably less pain yeah the thing, that is kind of annoying I mentioned, that in the dock with the approach, that relies on consensus is approach, that relies on consensus is, that we would have to secure the we would have to secure the upgrades in some way right. Now upgrades. Because they are small we don't actually they are not actually signed. So you know values in SCP they are actually signed so, that you. If some valid signed so, that you. If some valid data all goes Rogue you can basically blame you know who introduced, that the value, that is kind of spamming the validators. So you can basically decide okay I'm going to remove this validator from my Quorum set, that's kind of the idea with the signature upgrades are not part of this. Because it's today they are small so. If we + +[15:00] start to make to have to and the for upgrades not being included in the sign payload is the there's an option for validator to remove the upgrade from the value. If they don't agree with it. So for you know so, that basically you can still close ledgers you know with transactions even. If there is no consensus on the actual upgrades, which yeah I mean we didn't have you know. So much of those problems you know until. Now there's like you know broad agreement on like for example. If you want to upgrade the network right to a specific protocol version there was no contention I think we'll have potentially more contention in the future as we think of network parameters, that maybe are impacting certain contracts right like as every time we change like fees I mean like + +[16:00] the yeah metering schedule right like we're going to cause maybe certain contracts to become more expensive. Therefore there is maybe a higher chance of disagreement between very data. So I think this the chance of conflict is higher in you know post-organ post-organ and, that's why yeah I think we may have yeah multiplications where somebody there are going to say Hey you know I'm I don't agree with this thing. So I'm going to drop, that upgrade and. And then until the conflict is resolved you don't want to be in a situation where the network is not agreeing on basically anything right anyway. So so, that's kind of why you can drop those upgrades and right. Now they are not signed. So we would have to either make them signed + +[17:00] if we want to make them bigger or just not you know keep them small and you know, that's kind of the option too hopefully, that makes sense to people. So I do have a question, that relates to the user experience of changing the configuration right. Now you know validators you know just have like Stellar Core commands to control this is the idea to maintain the same experience across all of these different approaches or do different approaches mandate different user experience I think I can answer, that I mean it's easier to arm an upgrade for a hash like it doesn't require any changes. Because hash is a small like. If you needed to distribute the whole upgrade set as in option one and, that's durable. But it's annoying like it's annoying to do this + +[18:00] as a or command you need to maybe extend the command interface to take a file instead of you know just to get request or something like this. So approaches like two and following a SIM for instance. Because it doesn't require any exchanges like we say Hey you like you want to upgrade to this set of configs with hash X here is a link to the lectures, that actually contains this entry. So I think the distribution is kind of linear in this it's easier to arm these upgrades. But it introduces the transactions, that needs to be submitted yes someone needs to submit a transaction this is true yeah. So the US does change. So in all approaches right. Because in the approach two and three you need to you know submit this operation. But in approach one the initial set of upgrades needs to be distributed as well right + +[19:00] I guess, that would be maybe another solid core command, that the validator would submit and then, that's how, that would be flooded right. But to be honest I do feel like approach one is an extension of the current experience we have, which is you know the validators will coordinate the validators always need to coordinate regardless outside of the network right. So you know they coordinate on a Channel right. Now you know Justin tells people hey. If you want to upgrade or to change this value you this is the command you use right. So maybe the mechanics are going to be a bit different we're going to say hey this is you know an XDR take a look at it at a Stellar lab or SDC or whatever and this is the hash vote on it. So it's still like you don't introduce like another step of you know submitting transactions to the network well only one person needs to submit the + +[20:00] transaction it's not like value there was once you know the thing is populated you know it's basically like today yeah basically whoever initiates the upgrade should send the transaction, which I don't think is a huge View. Because it's a big thing they need to propagate any weightages foreign got it okay, that makes sense yeah by the way this closes a kind of a potential you know communication problem, that we have today right like today. When we for example. When you know we as SDF say Hey you know we are proposing to upgrade the network to you know version 19 right like we did last year people look at, that message is like it and it's not signed we don't publish, that with you know digital signatures or anything. So in theory you + +[21:00] could imagine somebody faking you know us. And then getting people to vote for something else I think the chance of, that again increases as you increase the complexity of those things, that people are voting on and the benefit of the having the The Entity, that is kind of championing for a specific change is, that. Because this is actually an actual transaction on the network it's also signed by The Entity so, that's kind of you know one of the extra pre-free benefit from option two and three. So essentially a configuration change proposals become on-chain activity yeah and actually so, that's something I wanted to kind of maybe. Because we didn't talk about, that too much like in the doc we only kind of mention it, that option two like one of the things I think, that this opens the possibility for + +[22:00] in the future to have a like a point the validators to like Ledger entries, that are controlled by those so, that you can form many validators there's well there is a concept, that we discussed before right, that is actually in a paper, that was published a few years ago about the notion of governing and non-governing validators and I think this for certain things this will this may actually become like more interesting you know in the context of yeah Dell's, that are not the tier one validators, that are kind of managing certain aspects certain parameters, that you know of the network. And then the validators + +[23:00] and. Then the validators, that are interested in actively participating this of course you know they get to they would not just blindly vote for those things. But otherwise other validators just might do, that right for those do, that right for those like maybe not as coupled to network operators right or maybe even need like additional voting or something right and you want, that to happen on chain and you want, more transparency or something right got it. So yeah it does sound like there's some added benefits to approach too and it seems like from an implementation perspective it's fairly simple do we have any concern about you know it's not immediately obvious, that Stellar configuration changes should be proposed on Soroban it feels a bit like we're using just + +[24:00] Soroban. Because it's there Soroban. Because it's there and you know I don't find it offensive. But but like can there be you know issues from it and things like we want to do like in pure Stellar, and now we're introducing them to the storybond I think this kind of ties into like subtopic I wanted to talk about is whether or not to be as separating sort of an operations into like different operations in the transaction. Because currently you have just one golden vocalist function, that does a bunch of stuff and this changes it would do even more stuff. So I'd say using Soroban is just an implementation detail here and nothing prevents wake up an option two from being a separate operation and also we may want to do this to all the operations anyway just to you know have a single flat hierarchy of things you can do to discover + +[25:00] which basically would make it pretty opaque in terms of like how exactly transactions have been executed I think improves the X sound would I think improves the X sound would. So I can. So the derived question here is whether we want to split the invoke host operation into like multiple types of cerebon invoking operations yeah right like in this particular case like I feel like it's a race a strong case for making it a separate operation it was very explicitly namely containing internal assumptions, that you know people don't randomly submitted and for all the existing operations there is the same consideration I know people have any opinions on this would be accessible from the story would be accessible from the story about environment in addition to being + +[26:00] accessible through the this dedicated operation no I mean the same about the current invoke host function transaction operations, that annoys me and, which is why I'm proposing this is, that we are not actually invoking a host function in the same way you can invoke it on yeah from the contract like they are Divergent they like the cost functions, that we invoke from Stellar Core are not the same course functions, that are more invoked from contracts. So the name doesn't make too much sense I mean even, though initial intent kind of made sense. But is there any implementation and requirements ended up they're not quite consistent and the convinced we should keep it this is in this cost functional you know we could just be a bit more explicit and just say the operations and the adapter lessons you are doing without involved in the Waterhouse at all yeah my opinion + +[27:00] yeah I think the original the was to make calls original intent was to make calls uniform and I think we may be paying a fairly high price to try to make two entry paths to call look the same you know we're sort of orienting everything in order to make, that one path prior to use code well I think maybe like the this is more related to I saw there was like. If we have a dedicated host function for all this stuff I agree maybe we need to think about this. But like I'm not actually convinced, that this is the necessary to have anything first class for those network upgrades inside Soroban like I think we should be able to point to basically any saruban State should be considered valid for like as long as + +[28:00] we can you know like I was saying in the dock like it's basically a byte you know a byte array, that we happen to be able to decode you know using. Because it's some XDR right, that we can understand, that the network level but, that in from a Solomon point of view I think it's just a byte array and. If we do it like, that there's nothing special about like any of, that stuff okay this is an important point, that I didn't understand. So from the approach number to your perspective we're not actually introducing changes to Soroban in order to facilitate this. So what is in the dock right. Now it talks about having special Ledger entries, that are like you know. So you need like a host function to basically manage to the special entries. But I'm not at you know. When we originally discussed, that I didn't think we would + +[29:00] actually have dedicated legendaries for the for as input to the upgrade I thought, that at the end yes you know. When the. When upgrades are actually. When those network settings are actually active yes there are special entries on Ledger. But in terms of like how do we how do you get did they get fed into the upgrade function, that I don't think we need to have a dedicated entries you know with, that specific format as input would, that be at, that point in the dark was more to specify, that it's not a config setting entry I guess it wasn't you know it wasn't clear what it actually is. Because I wasn't expected out. But what Nico's saying is probably the best way to do this where we specify, that you know for an upgrade it'd be a byte array of a config setting entry or a byte array of a vector of config segment entries right + +[30:00] I'm not sure I'm good at the point. Because they don't have an entry like sent to me it would just be I think with Nico's things it'd just be a contract data entry it's not a contract we don't have a contract. So it cannot be contract data you know you can no like is anybody like you know as part of this right like you can imagine, that's why I think like as. If you think of in the future you want to have some doubts doing, that like it's just a special case like. If I want to in the first iteration of this the way you would do it is you have a dummy contract you know, that has like State you know associated with it, that is just a single you know, that has a single value in it and, that allows you to basically you know as a user I can persist I can yes like kind of like a balance right + +[31:00] for you know for token contract. But instead of a balance it's basically a binary blob. And then in the upgrade, that's how you get it on The Ledger all right. And then the upgrade just points to, that specific Ledger entry, that contains the binary problem of Interest. So what do you guys suggesting we would have a contract specific cost function no there's nothing specific to this it's just like yeah to in order to construct how would we vote and wait like it has to be special right like a similar proposal and option for opposite there needs to be a way like since we are voting on a hash or something you need a straightforward way of finding this hash in The Ledger yeah, that's why you can point to any arbitrary contract data right you're using the Ledger key + +[32:00] a hash yeah you bought an electric key you would vote on a pair Ledger key hash probably right like maybe you can make it work with just a ledger key. But like, that I you know for. Now I think it's still a joke right maybe we need to yeah we need to maybe like sketch, that a little bit so, that I can. So basically you would vote on say I mean, that's quite inefficient right I guess the best you can do is contract 80 plus some data ID and accurate format actually I do. But so I think this has been super informative it sounds like there's a very strong bias towards approach number two here and there are some details, that we need to figure out like whether this is actually something, that's special cased in Soroban or it's just a general + +[33:00] in Soroban or it's just a general ledger entry. And then the validators is part of the upgrade process just pointing at this Ledger team. So let's I think we can take this conversation offline about the details and. If needed we can bring these up in one of the next meetings we have around 20 minutes left. And so before we go on, that Siddharth do you think we have enough to keep prototyping here yeah we should be good okay. So we don't have a lot of time we have 20 minutes and Garand is here today to start talking about State expiration, which is a very big hairy topic. So garen's gonna give a very brief overview about this today. If we have time we're going to do Q&A I imagine, that we're going to be talking about these about this specific issue a few more times. So no big decisions will be made today during the stages + +[34:00] yeah. So I guess we want the time in time. So I just wanted to kind of start out with the general motivation. And then talk about more about the interface. And then we'll leave the implementation details to probably future conversations. So essentially the issue we're trying to solve with archival state is this issue of unbounded Ledger State size at least right. Now it's not a classic the number of Ledger entries we have to store is growing and there's not a strong incentive for users to delete entries additionally a large amount of those entries, that exist on The Ledger are either outdated or just won't ever be used again for instance we have a lot of climbable balances, that are more spam-like entries, that aren't used very often or won't be used at all they're taking up this Ledger space and increasing larger bloat. And so for the healthy skill patterns of the network we want to be able to essentially CAP Ledger State size and not allow for this arbitrary growth and to do, that we want to essentially delete entries, that + +[35:00] aren't being used. But somehow not delete and keep the entries, that are being used often. And so in order to do, that what we do is we want to implement this concept of rent. So essentially for all Soroban smart contract data entries the data entry will have a rent balance, which is some amount of XLM, that is reserved for, that entry to pay a rent fee. And then as, that entry lives on The Ledger it will have a rent fee deducted from, that rent balance. And then whenever, that entries rent balance goes to zero the entry is deleted from The Ledger answers. If it never existed before. Now this is obviously it opens up some issues right. So imagine you have a wallet or a balance, that stores a lot of tokens, that are valuable you wouldn't want this entry to be permanently deleted and lost just. Because you forgot to pay rent and forgot to up to rent balance. And so instead of even, though the entry is permanently deleted from The Bucket List what we do is we take, that entry and we send it to a special + +[36:00] kind of node called an archiver node and this archiver node is essentially storing all of these entries and then. If a user. Then wants to use an entry, that has defaulted on rent. And then been sent to an archiver node. Then what they have to do is they have to go pay a fee. And then retrieve, that entry from the archive node. And then give, that entry back to the validators. And then once, that fee has been paid the validators will. Then take, that archive entry put it back on the bucket list. And then this entry can be used as. If it was never deleted. And so kind of the implementation details as to how this work is the archive is implemented in a Merkle tree-like structure. And then in order to restore an entry from the archive. And then put it back on the ledger. So it can be used again you have to provide a proof of inclusion, that this entry, that you say was archived actually does exist in the archive. And so this Merkle structure is very powerful. Because it means, that validators are able to check and make sure, that the entry you say is in the archive is actually legitimate + +[37:00] but the validators don't need to store the archive all they need to do is store this root Merkel hash. And then they are able to validate proofs, that are generated from the archive. So kind of high level how this works is the validators themselves don't store any of the archive State and cannot produce these proofs. But the archive of the archive nodes do store the entire archive State and have enough information to produce these proofs. And so the archive nodes we kind of Envision to serve a similar purposes kind of Horizon where they are supporting the validators. But are not directly involved in consensus. And so from an interface perspective this has a couple of issues, that we kind of need to discuss especially. When it comes to security and so. Because the we want the validators to store as little State as this archive is possible they don't store any of the keys and they don't know what's in or not in the archive and so. If you can imagine an example say we have like this token + +[38:00] contract right and you have a balance on, that token contract. But you don't use it. And So eventually the rent balance on, that entry will go to zero and, that entry will be deleted from The Ledger. And then stored in the archive. Now once you default on rent from the perspective of the validator it's as. If this entry has never existed before and. Because the validator doesn't actually store the archive it has no way of knowing, that this entry used to be in the bucket list or, that this entry is currently in the archive. And so this means, that. If say the smart contract was to create another balance with, that exact same address this would be allowed right. Because the archive or. Because the validator doesn't know, that this entry and this key exists in the archive it will create and generate new keys, which means, that we have this issue of key collisions where you can imagine. If this process was repeated several times it's possible to have an entry with multiple different versions of, that entry with the exact same key, that exists I'm simultaneously on The Ledger and also in the archive + +[39:00] now for some types of data these key collisions aren't, that big of a deal right so. If you can imagine. If you have say a balance of some amount of token with your given key and there are multiple different balances you have. So say you have 10 XLM in an account and then, that 10xl imbalance gets archived. And then someone sends you another 5 XLM and eventually, that entry gets archived with your key there are two entries in the archive, that corresponds your balance one with 10 XLM one with 5 XLM. But this isn't, that big of a deal. Because both of them are valid right you can just restore the 5x on them balance spend, that 5x Alum. And then restore the 10 XLM spend, that no problem. Because both of those balances are valid even, though they have the same key. But for some data types, that's not the case right so. If you can imagine a smart contract implementation, that uses a nonce value. So let's say, that we have something like USDC, that uses a nonce to protect against double spends and make + +[40:00] sure, that transactions can't be played say, that this nonce value is something like 10 a non-zero value. And then gets archived well. Now this token contract. When it needs to do something the token contract will see, that there does not exist in nonce. Because from the perspective of the validators the contract itself has no way of knowing what's in the archive. Because the contract is running on a validator and the validation store the archive. And so whenever the contract sees, that there doesn't exist a nonsense on The Ledger instead of going to the archive and restoring the nonce with a correct value of 10 what the contract will do is just create a new nonce with a value of zero. And so what you can imagine is say, that we have the correct notes value, which is 10 in the archive and this new zero nonce. Because this nonce is incorrect and is essentially shadowing a valid version of the nonce in the archive this allows malicious users to execute a replay attack. Because they can take a + +[41:00] transaction, that says hey. If nonce is equal to zero this transaction is okay and even, though the correct non's value in the archive is 10 the knots value on The Ledger is zero. And so they can replay this transaction and maliciously use this nonce value. And so we have this challenging problem, that for certain types of values like nonsense there needs to make sure, that you only have a single version of, that entry between both the bucket list and the archive this is a problem. Because the validators don't store the archive. And so there's no way of checking. If a key exists or not directly from the archive so, that's issue number one this thing like nonsense where you want to make sure there's only one version of the entry, that exists. And then the second or the second issue, that comes up with security is something where you restore an outdated version of an entry. So similarly let's think of a token where you have some sort of kyc Entry right. And so say, that you have a kyc + +[42:00] entry, that allows a user to spend their tokens and then, that kyc entry isn't touched for a. While and. So eventually it runs out rent balance and gets sent to the archive. Now a new kyc entry is generated on the bucket list. But this kyc entry revokes access to those funds. Now let's say, that this new kyc entry, that revokes access isn't used for a. While and it too falls into the archive. But a malicious user might do is go into the archive instead of restoring the most recent version, that revokes access instead restore the earlier version of, that kyc entry, that gives the user access to spend those funds. And so by restoring an out-of-date entry what you can do is essentially do a versioning attack where you take an out of date entry restored onto the bucket list. And then you're essentially able to pretend as. If it is newer than the kyc entry, that revoked access. And so essentially we have two issues we need to solve here first we need to make sure, that you can only restore the + +[43:00] latest version of an entry. So you can't do this like kyc rollback attack and second for certain types of data we need to make sure, that there's only one version, that exists in the archive in the bucket list. Now we don't need, that guarantee of the uniqueness guarantee for all types of data for example the balance example I talked about earlier it's completely fine. If you have multiple different balances. But for things like nonsense you need to make sure you only have one. And so essentially. Because we have these two different requirements we expose two different types of storage at the Soroban interface level right. Now we're calling this unique storage and recreatable storage. Now essentially the differences here is, that unique storage guarantees, that whatever entry you have there's only ever one copy of. So in our non's example. If you're using unique storage and you say you know I use unique storage to create announce value and, that Knox value gets sent to the archive. If you try to recreate, that value the function will panic. Because it says hey this is unique storage and entry already + +[44:00] exists in the archive. So I'm not going to let you to recreate, that under the hood how we do, that and again this is an implementation detail for later is we use a combination of proofs of inclusion and also proofs of exclusion. So for instance whenever you create a unique data entry for the first time you need to also provide a proof, that this entry has never existed before. And so this proof needs to become from the archive nodes. Because the validators don't store enough information to prove, that something never existed but. If you provide this proof to the validators they are able to check the proof and make sure it's legitimate and so. Because you have this extra step for Unique data and needing to prove, that nature never existed it's more expensive. Now the recreatable data doesn't have this guarantee. And so for a thing for entries like nonsense, that have security implementation implications you wouldn't want to use recreatable data. Because you could have multiple versions of your nonsense and you could recreate nonsense and have security issues. However you would want to use + +[45:00] recreatable data for something like a balance, that doesn't have this issue. Because recreatable data is much cheaper. Because you don't need the security guarantees you don't need to provide all these proofs of exclusion and whenever you create a recreatable entry you don't need to prove, that's never existed before. And so essentially we tried to provide these two classes of data so, that users who need some sort of security guarantee can use the more expensive and slower unique data. But for entries, that don't require those strict guarantees you can use recreatable data. Now in addition to these two data types we also introduce a third storage type called temporary storage and what temporary storage is it kind of just does what's the name sounds like it's entries, that are meant to be temporary. And so essentially. If you have a temporary storage entry whenever its rent balance goes to zero instead of being sent to an archive it is permanently deleted and. Because you don't need to worry about sending this to the archive and there's not this archival cost the temporary entries are + +[46:00] the least expensive storage type. Now you wouldn't want to use this for sensitive data such as balances. Because if a balance defaulted on rent it would. Then be lost forever. But for temporary entries such as. If you want to give another user authority to use your funds for say 100 days you could use a temporary entry, that automatically deletes itself or. If you had some sort of data, that is easily recreatable such as like a payment Channel you could also use temporary data for, that. Because whenever it was deleted you could just regenerate the exact same entry again. And so these are the three data types in the storage mediums we're trying to present to the end user. Now the issue is we have these security issues, that are generated from the archive interface and they can be very tricky to protect against for instance it's a very tricky to think about examples like kyc rollback and nonce attacks in the context of this archive especially since archival state is not only a new interface for Stellar. But a pretty new interface for blockchains in general and + +[47:00] so we want to provide an interface and Define use cases as clearly as possible for each type of storage and try to make this as seamless as possible for the end developer and trying to abstract away as much of the complexity from the archival interface as possible. So I think this is a pretty tricky problem. But I think, that's the high level issue I want to talk about today is just the security issues, that arise from this archival state. And then the three types of storage, that we've tried to implement. So far. And so I think I'd be open to any questions or. If anyone has anything I'd like to talk a little bit more in depth about bro thanks for, that just a quick question you just you were talking about unique storage and having to supply proofs, that does, that mean, that in order to create a unique storage entry I need to have access to an archival node yeah. So essentially how, that works is out. When you actually from the + +[48:00] perspective of the validator itself. So yes you do need to have access to a an archiver node to produce these. And so kind of what we're Imagining the interface would look like is, that RPC nodes kind of double as pre-flight nodes and archiver nodes. And so during the free PreFlight process. If you generate a unique entry or. If you want to access an entry, that's in the archive of the PreFlight note itself will carry archival data. And so the PreFlight note will be able to essentially kind of work similarly to the footprint. When it generates Footprints for a read write data accesses, that the pre-flight node will be able to retrieve the proofs, that are required for your transaction. And then give those Trend or give those proofs to the validator and so. Because the PreFlight node is generating and providing these proofs whenever you actually go to apply, that transaction on the validator the proofs are available. Because they've already been made in advance by the previous flight node + +[49:00] and just a bit here for recreatable storage I don't need, that right yeah. So for recreable storage it's cheaper. Because you don't need to provide any proofs and there's no work to be done you can just. If a key doesn't exist on The Bucket List you can just generate, that new key without any checks or balances no problem well to make an amendment like it's not like nothing has to be done through a created both storage. Because if you want to benefit from the fact, that it's recreatable you need to write some codes, that allows you to nurse entries for example. If you have a token contract and your balances are creatable you probably want to provide a contract function, that allows users to recreate there is a balance given approve basically. So like both involve something to be done. But in case of unique storage it would need to be done every time you create a new entry. While for a tradeable + +[50:00] it's only. If your entry has got archived and you actually want to restore it, which hopefully shouldn't be the case too frequently yeah kind of the difference is in unique storage. If something exists on the archive you must restore it in recreatable storage. If the entry exists in the archive you have the option of either restoring it or just. If the restore would be too expensive or you don't want to restore it for some reason just creating a new entry with the same key. But but just to be clear with regards to what Dima just said. If I do have an existing. If the Ledger entry has been recreated with the same key. When I try to restore it right. Now will it fail or will it like use some sort of like custom merge capability what's gonna happen yeah so. When it comes to recreatable data there's a function, that we Define called an archive latest version and + +[51:00] essentially how this works is whenever you restore unique data it's automatically add to the bucket list immediately. Because unique or for Unique data. Because unique data is known to be unique. And so you know you won't have a key collision between the archive and the bucket list. So you can just automatically add to the bucket list for recreatable storage this isn't true there could be a key collision. And so whenever you restore a recreatable storage entry it's not immediately add to the bucket list. But instead this function restore latest archived entry just Returns the unarchived version right. And so in the doc. If you look at the last two pages there are two example functions on how to restore our recreatable data to example implementations, that contract might do. But essentially. Because of these key collisions it's the responsibility of the smart contract implementation to resolve these collisions. And so for instance what you can do is after you get the restore + +[52:00] latest or the unarchived entry returned from this function you can check the bucket list and see. If there's a collision and then. If there's no Collision the smart contract can just write the key immediately no problem but. If there is a collision. Then what the smart contract might do is compare the values of the recently unarchived version and the bucket list version and pick one or discard the other or in the example of balances there could be some sort of merge operation defined by the smart contract and so. If you have two balances the unarchived behavior might be okay take the unarchived version sum the balances of the version, that's live on the bucket list and the recently unarchived version. And then just write the result, which is the sum of those two balances and so. Because there are different use cases depending on the contract, that's a contract implementation, that this contract developer itself might think must think of it and is there a I see you have a section on default implementation what should be the default + +[53:00] implementation yes I think the default implementation is just. If there's no key Collision IE. If you want to restore something and there's no key on the bucket list. Then you restore it and add to the bucket list. If there is a collision just panic and fail. Because you don't want it to be a no up. Because the an archive function does delete the entry from the archive right. Because you can't unarchive and entry twice from the archive once you've unarchived it once it's gone forever. And so you won't want to lose entries. If you can't. If there's a key collision. And so I think the safest option, that preserves all the data is for the default just. If there's a collision just panic. And then you either have to wait for the entry, that's on the bucket list to be archived or delete the entry yourself on the bucket list. And then the default unarchive will work + +[54:00] okay I imagine there are a lot of questions unfortunately we are at time. So this discussion is going to carry on feel free to ask questions on live chat Garen. If you can hang out there and ask any questions that'll be great and yeah thank you all this has been a very productive hour and we'll see you all next week + +
diff --git a/meetings/2023-03-02.mdx b/meetings/2023-03-02.mdx new file mode 100644 index 0000000000..eeacf02c19 --- /dev/null +++ b/meetings/2023-03-02.mdx @@ -0,0 +1,164 @@ +--- +title: "Auth Next: Design Review" +description: "Discussion of the Auth Next proposal plus related protocol work: Soroban fee-model considerations, ledger-state archiving design, and rent-bumping mechanics for on-chain storage." +authors: + - dmytro-kozhevin + - garand-tyson + - graydon-hoare + - justin-rice + - nicolas-barry + - orbitlens + - siddharth-suresh +tags: [soroban, CAP-46-7] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session reviews the Auth Next direction (account-abstraction style authorization with standardized payloads) alongside adjacent protocol concerns that impact developer UX and network sustainability. + +Conversation also digs into Soroban economics and storage lifecycle: fee-market design across multiple resource types, a tiered storage/archival interface for managing ledger growth, and practical ways to keep frequently-read state from expiring without forcing every read to become a write. + +This thread covers a design review of unified authorization for Soroban, focusing on the implications for the host environment and SDKs, alongside updates on fee-schedule work and how different resource markets shape the contract developer experience. It also includes a follow-up deep dive into state archiving, storage classes, incentives for proof serving, and emerging patterns for rent maintenance. + +### Key Topics + +- Auth Next overview: moving toward a single authorization model so contracts can delegate signature/policy verification and focus on business logic +- Fee model refresh: revisiting a draft “fee CAP” and aligning it with newer realities like preflight; framing fees across compute, ledger, bandwidth, and externalized ecosystem costs (indexers/streams/archives) +- Storage interface proposal (three classes): + - Unique storage: single authoritative version (never both live + archived); intended for “must-not-be-reinitialized” state like admin/control entries; higher creation cost due to exclusion guarantees + - Recreatable storage: cheaper creation by allowing archived key collisions (multiple historical versions can exist); discussed mainly as a scalability/UX trade-off for common patterns like token balances + - Temporary storage: short-lived entries that delete on expiry (no archive), favored for scoped approvals and time-bounded auth-related artifacts +- Staging plan: ship the interface + rent charging early, with archiving behaviors/proofs phased in as infrastructure lands (including discussion of a “null root hash” style bootstrap for exclusion proofs) +- Bloom filter debate: whether probabilistic “key exists” hints could reduce proof overhead, balanced against false positives, adversarial key targeting, resizing/migration, and consensus constraints +- Archive proof delivery models: + - Direct archiver endpoints (Horizon-like) vs. on-chain proof requests with reward-driven “archive miners” + - Key concern: proof theft / front-running when proofs are disclosed publicly, and what commit/reveal or other anti-stealing mechanisms might be required +- Rent bumping problem: how to extend rent for heavily-read state without rewriting immutable bucket-list entries; explored host-level bump operations, contract-managed thresholds, and who should bear the cost when state is “public goods” + +### Resources + +- [Auth Next design proposal (guiding document)](https://docs.google.com/document/d/1J-J3ClTUkrsLiJag906OH4hmNkZI3Jk6_Y9ZYt_psAI) +- [CAP-0046-07: Fee model in smart contracts](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-07.md) + +
+ Video Transcript + +[00:00] Okay I think we have like a couple of things on the agenda. So I think the first one was around please to kind of give a quick update of on where we are. And then like we have a follow-up discussions on the archiving work, that Garen has been working on maybe I can start with yeah face given, that this is like a quick thing. So yeah. So like we have we started to kind of go back to the, that fee CAP, that I was looking at the draft it was first put together end of June last year. So it has been sitting accumulating dust for a long time and yeah. So we're actually like looking into this again we started a by doing a couple things. So first one + +[01:00] was around refreshing the CAP to reflect as much as possible where we left it where it left things off in the on the mailing list there's plenty of still of open questions and yeah we're going to go over, that I think the other thing was, that we started a thread on this code on the channels. So yeah at this point it's more like yeah we need to go through and iterate on this before we can give more updates on what's going to be next on the feed front and basically like the yeah what we're realizing is, that there are a lot of things, that we didn't incorporate yet like there are you know this was written before we had for example the pre-flight in the picture things like, that. So + +[02:00] yeah there will be a for sure a little more a few more changes, that need to have to be done but, that's kind of what we are with this not everyone to kind of spend more time on this just like you know I'm inviting people to kind of follow the conversations and yeah talk about and maybe help on you know helping us making decisions on this one do we maybe want to keep a free high level overview of what is there and you know what are the open questions we are trying to solve a recent page let's spend more time on the archival proposal yeah I think, that yeah like. So I guess yeah good point like the type of things we are trying to + +[03:00] big issues I would say, that we need to kind of converge on the feed front is on what type of experience we want to expose to contract developers. When it comes to the different markets, that exist in the system. So we have the different resource types we have a Ledger space we have a compute. So like. When when transactions execute we have a network bandwidth and NBS I think in terms of C markets, that's Canada we have other things, that are related more to external systems. So like. When people for example produce metadata, that. Then gets consumed by systems like Horizon or sort of an OPC or yeah like still are + +[04:00] expert in red like uses this kind of a data stream. So like making it, that people can just spam those other systems is part of the you know in school for the fee schedule. And then what did I forget oh yeah we have also archives what has basically answered mandatory right like published to those history archives. So making sure so, that people don't use, that as they are alternative to S3 or you know like other places, that you know. When people can store data. If they want to the difference here is, that those history archives are romantic analysis forever after. So there are some constraints there and yeah. So like the type of problems around those this the model for fees is how do we make it, that we + +[05:00] can balance usability. So right. Now you know people used to do the classic system they have a very simple way to think about things basically you have a base fee for one operation. If your transaction contains more than one you just multiply and, that's kind of your base in a way to kind of think about the you know in terms of fee and. If you want to get ahead of other transactions on the network for whatever reason it just increase your feeling, that's kind of it right and in Soroban like the. Because of the competition between those different resource types, that are open-ended right in terms of consumption and competition. So yeah we're going to need something a little bit better than, that I mean like they're like in + +[06:00] places like in you know Italian they have you know a version of what you can do with these. So this is a single fee for everything there are like proposals to make it maybe okay it comes to different resource types but, that's nothing yet in Italian it is implemented in other on cases like polkadot I think. But yeah, that's why we are kind of trying to get something usable okay I think, that's kind of what I wanted to talk about on the feed from just you know heads up it's coming let's see and yeah we have the next guarant, that I think wanted to give us a little more updates on last time there was like we started to talk about the + +[07:00] archive mechanism, that allows to save space on the Ledger so, that we can keep the network as cheap as possible and I think there were a few interesting follow-up conversations, that also happen after, that in this Garden of this goal. So Aaron and give us maybe a little a few updates on what's going on there yeah. So I guess first I want to talk a little or just have some time for questions about the interface, that we talked about last week. And so kind of just like a high level summary of what we went over last time is essentially all sort of on data has this rent fee and so every Ledger or periodically you have to pay rent for keeping an entry live on The Ledger. And then whenever an entry runs out of its rent + +[08:00] balance it could still be in from The Ledger. And then sent to the archive. And so with, that interface we've kind of exposed three different classes of storage these kind of three different types of storage replace today what is currently the storage layer, which is like end dot storage in your smart contract code. And so with these three types of storage Ares we have a unique storage, which is there's only ever one version of the entry, that exists on the bucket list or there's a single version of, that entry on the archive. But never both and this is useful for types of data, that have security concerns such as nonsense or certain types of authorization where there could be security risks and issues. If you have multiple versions of, that entry, that could be restored kind of the use case here is. If you could think about Implement a nonce where you didn't have this unique storage guarantee you could find yourself where you have a + +[09:00] version one of the nonce in the archive of like say value five. And then version two of the nonce and the archive with a different value. And then you can imagine how a malicious user could restore those entries in such a way, that your knots values out of date and not the correct value, that should be so, that's unique data it's more expensive. Because you have to prove, that something doesn't exist in the archive whenever you create something new. And so there's a little bit of work, that needs to be done. So it's the most expensive data type. But it's reserved for like those security and high-risk sort of entries. And then after unique storage we have what's called recreatable storage, which is a similar in, that recreable storage entries whenever you run out of rent balance also get sent to the archive the only difference is, that recreatable storage might have different versions in the archiver multiple different versions exists at the same time the reason for this is, that whenever you create a recreatable storage entry you don't check the archive to see. If something already exists there. And so say you have + +[10:00] something like a balance, that got archived. And then you go to create a new version of, that after your old key got archived in recreatable storage you don't check the archive. And so you just create a new entry with the exact same key. And so you have this key collision and so, that's it's a little cheaper than unique storage. Because you don't have to check the archive and actually show, that this entry is unique, that could be multiple versions of it. So it's cheaper. But it's not appropriate for security types such as like nonsense or auth where you don't want multiple versions and so, that's unique storage and recreatable storage both of, which can be archived. And then the final type of storage is called temporary storage and this are for short-lived entries. And so whenever a temporary storage entry runs out of rent it just gets deleted it doesn't get sent to the archive. And so temporary storage is an appropriate for sensitive data, that you want to keep around like user balances. But can be useful for data types, that either don't need to live very often like a short-term authorization to let an + +[11:00] address spend your funds for instance or for data types, that can be easily recreated. If they get deleted such as like a payment path or something like, that. And so I think. Now even I think first I just want to open up the floor for questions and to talk about this kind of like a interface and in particular talk about this like three-tiered approach and having three different classes of storage. Because I know, that there's a little controversial and it's definitely a little bit more complex on the current interface just wondering. If there are any questions, that I do have a question. If I can go for it. So this is in my sense this is actually a very good design I like it excuse me I'm wondering in terms of staging this work I it's quite complicated and involved in some quantity of it is going to depend on + +[12:00] some pretty big components being built out in terms of the archivers, which is fine and I think we can do some staging I'm in the sense of you know deploying versions of zoroban, that have the interface. But but you know some of it is just defined to do nothing at this point or, that sort of thing I'm a little bit concerned about the unique storage one. Because unique right off the bat has to have these exclusion proofs in order to do any rights is, that correct yes I think what we could do is we can still do a staging process right. And so I think what this would probably look like in practice is whenever we launch sort button we don't have the archive built out. And so kind of the current plan for you know v0 on launch is to have the interface set. So to expose the unique recreable and temporary storage entry types to the user. And then to charge rent. And so the thing, that won't be + +[13:00] there, though is, that whenever your rent balance goes to zero you won't get delayed and you won't get sent to the archive. Because the archive won't be built yet right. And so I think for Unique data in particular what we probably just want to do is we can still I think we still should launch unique data and recreable storage just so, that contracts can have the correct Paradigm written at launch. But what we can do I think at the implementation level is just the four just special cases right. So like before we have these proofs we can just say essentially like creating unique entries does not require proof of exclusion until we actually provide an interface for those proof of exclusions, that's, that's exactly the part I was asking about is do you think we would just sort of make it an optional field at first and then. When we rev the protocol the optional field has to have a value in it and we'll Define what, that value is in the future well we could do, that we're actually now, that I'm thinking about it what we could do is just zero initialize through cache and say the root archive hash is null. And then proof of exclusion become + +[14:00] trivial right. Because if your hash is null. Then you're guaranteed, that it's empty. And so I actually think we can provide proofs of exclusion on day Zero actually. If we just Define the null hash. And so the proof will always be null but, that's about proof. If your root hash is null okay the other thing I'm thinking is, that you want this you want to use uniques for nonsense and it seems like we update nonsense quite a lot. And so I'm a little bit concerned about sort of an expensive operation, that involves constructing a proof of exclusion has to adhere to every nonce update certainly I wonder about the nonsense, that are maintained by the auth system maybe Dima could speak to, that I think the third yeah I can I'm sorry. If you don't mind current. So I think guaranteed is using non's example is well just an example of why this problem + +[15:00] matters for the built-in nonsense at least we decided to move forward with a temporary knowns approach or nonsense and signatures basically have some time boundaries and certain the temporary Ledger entries. So they don't even run again and I would say this should be a preferred approach right like I reiterated this several times in the discussions here on Discord, that with the existence of temporary storage it's a really good idea to try benefit from it and try to design for it right like in this case of nonsense right we can have this we need to bump it multiple times and it's not super convenient and stuff and I think the main use case for The Unique storage is really some admin data like you really don't want your admin entry to be taken over by someone just + +[16:00] because it has expired right you have a token contract, that you have insured once a year ago and you have never touched it. But you don't want to wake after a year the rent has expired you don't really want someone else to just re-initialize it. Because the entry has expired. So I feel like this is the main use case and this is a really pretty cold entries. If you think about it right. So yeah I think thinking about this a bit I sort of retract my concern. Because I think you're right, that if. If the system has a well-defined notion of temporary storage with time limits on it. Then you just time bound anything, that is you propagate those time bounds to the things, that use, that storage such, that they would become invalid at the same moment. So I can see, that being quite a viable approach I like, that's good thank you I also once make one additional clarification I think in your original question you said oh do you have to like provide a proof of exclusion every time you update it and, that's not true you only have to provide the proof at creation time + +[17:00] and. Then of course you have to provide proofs. If you run out of rent. And then get sent to the archive. But once it's actually live on the bucket list. Then it says just as. If you're modifying any other entry the proofs only apply. If it's not on the bucket list or. If you're creating something for the first time and so. If it's like again this is just an example. But like. If it's a nonsense, that's regularly used. Then it wouldn't matter it would be very cheap and efficient. Because it would never get to the archive okay and oh I have one other very minor question and this is more of a design like time to hit the thesaurus we already have something in a system called an archive I just feel like we gotta use a different word for this. Because it's just gonna follow up a lot of things, that are already referred our guys yeah maybe like deep State source and like the yeah I don't know what you're gonna call it. But maybe not start with deep state, that might be a tough a graveyard yeah cool. So I guess any other questions about specifically the recreatable temporary. And then unique storage + +[18:00] interface before you move on yes I'd like to ask why like is there any reason why you don't use something like a bloom filter to check whether the entry already exists in the arch Avenue. Because given the notion, that we have a bloom filter of all entries in the archive we can probably avoid having multiple versions of archived entries and I think having only one version in the archive and preventing users to recreate some entries, that already exist in the archive makes a lot of sense easier yeah. So we definitely investigated this + +[19:00] a lot and we tried to find a way. If there is a way for validators to store keys or to at least have some knowledge of what's in the archive I think there's a couple issues there. So first the goal of the archival state is to bound the amount of storage, that values need to store and so. If they need to store a key even, though that's you know less than storing the entire data entry, that's still an unbounded storage so, that's issue number one. But I think in particular you'd have to have a set of keys, that grows unbounded and, that's not a great solution. Because especially for sorbonne data types there's a lot of instances where the key is actually as big or larger than the value. Because if you can think like the keys are 32 bytes and I'm not you know super in depth at the current sort of implementation. So correct me. If I'm wrong my understanding is keys are 32 bytes. But the value for instance could be something as small as I can, that's only like four bytes or something like, that. And so I think you're not getting as good cost savings as you think. If you just store the keys business set not to your Bloom filter + +[20:00] question or. If there's a way to store the keys in a more efficient manner the issue with Bloom filters in particular is, that they don't it's very difficult to resize the bloom filter and so. If you say I have Unbound State and say okay we're going to pick a bloom filter, that's one gigabyte large. But in 10 years you need a larger Bloom filter. Because you're getting a lot of collisions and stuff it's impossible to just resize, that bloom filter without having all the values. Because whenever you change the size of the bloom filter you have to rehash everything, that you've added to the bloom filter, which would not be possible for the validators. Because they've thrown all those values away. And so essentially to resize your Bloom filter have to replay history from the beginning of time in order to resurface all the values, that need to be in, that blend filter and additionally there's still issues with Bloom filters. Because they're probabilistic in nature. And so you would still have key collisions sometimes or I guess or not Keyhole rather. Because they + +[21:00] only return they don't return false Nexus I don't think. But then there'd be certain keys, that just based on the probabilistic nature of the bloom filter the validators would think they're in the archive even, though they weren't in the archive and so, that would also be an issue where just based on whatever your hash function is there'd be certain keys, that would essentially be impossible to create. Because the balloon filter thinks they already exist. When they really don't so, that answers your question about the bloom filter issue in particular the issue in general as well I'm not entirely sure, that the resizing issue is such a huge problem. Because you have an archive where you have all the keys. So once in a let's say 10 years it's possible to organize a resizing using the archives as a source of all the + +[22:00] existing keys and to make like a maintenance for all the validators as for the size of the bloom filter for example I just checked and one billion entries with a reasonable false positives like Point 0.1 percent probability of false negatives takes about two and a half gigabytes. So it's not the clutch and it will be enough for the first billion entries I think it's a reasonable trade-off for to avoid some other complexities as for the false positives case I think since you have + +[23:00] the vehicle tree or some other structure. If it's not set. And so on yet. Then you can probably check whether their hive this is archive contains the given K give given key. If it's let's say it can be a resurrected or something like this maybe I'm missing something. Because I haven't think about this for quite a long time. But do you still think, that using some Bloom filters other probabilistic structures won't help to prevent collisions of entries in archives like maybe there is some other option. Because yeah this point looks like one of the most controversial things about + +[24:00] the archives to me yeah this is a an interesting idea actually I think you mentioned, that perhaps use the bloom filter as like a almost a caching layer for efficiency. And then using the proofs as kind of like a back end in case you get a false positive I'm still not 100 sure. If the false positive case can be avoided right. Because say like from this would be very frustrating from a user standpoint say I have a key or like there's some like deterministic way to like Define cues right. So like I give my address. And then like the address is input, that generates the keys for entries associated with my account right like in a token contract. If one of those keys is a false positive. Then you just won't be able to create any entries based on your the invoker address right. And so I don't know. If there's a way. If you get a false positive to somehow track the archive and say oh actually, that wasn't + +[25:00] a false positive it really is it really doesn't exist I promise. And so I'm not sure. If there is a way to get around, that case. Because I think like even. If you say like a reasonable false positive rate like a 0.1 percent, that still means, that one out of I think a thousand Keys or maybe a ten thousand keys I might be off by zero or something there. But essentially like one of the Thousand Keys you'll think it's in the archive. When it really isn't, which means, that you are not allowed to create one of the thousand one out of a thousand Keys, which I feel like could be a really significant issue from the user interface perspective do you think, that the time of checking the existence of the key over the Merkel tree or the miracle trios or the structure you plan to use for archive proofs it's like + +[26:00] a really huge time like seconds minutes or even more. Because if it's relatively small. Then checking the existence only for conflicting entries and you will be checking computer and entries only. When someone tries to create already existing entry, which actually I shouldn't be as often operation. So like what are the time requirements for checking against the try also I think one clarification here is, that you can't just track against the archive directly. Because the archivers don't participate in consensus + +[27:00] and are not validators they are off-chain right. And so you can't just like search through the tree brute force and trust, that the contents are correct. So there has to be some way for the archivers to give a proof to validators, that the validators can. Then validate themselves. And so I think, that's why part of the reason we're using this try model where we can get both proofs of explosion pros of inclusion. Because I think the difference between this case and the Ubuntu case is, that in the Ubuntu case you can trust the archive in our case we can't trust the archive we have to have some proof via like a hash or something. And so I'm still I think it could be interesting this Bloom filter approach. If we can do the bloom filter and then. If there's a collision maybe say even, though there's a collision in the bloom filter I'm going to go and get a proof of exclusion and, that would perhaps make it more efficient and mean, that in most cases + +[28:00] you can get away with creating entries without a perfect exclusion. But I'm still not sure what, that would look like I guess maybe what we could do is you create your entry you check the bloom filter and then. If you get an error on the bloom filter. Then you go try to get proof of exclusion and. If there's a valid proof of exclusion on chain, that can override the bloom filter, that might be an interesting optimization. But I guess, that's second point for explanation I don't want to steal the time it's definitely like I need to make more research on this and probably we should get back on this in, that sales matter to instill the time on this question here thanks for the explanation yeah I think, that's a I think it's definitely an interesting idea, that's definitely worth pursuing I guess one question I would ask is + +[29:00] under this proposal of the bloom filter and whatnot this would make all data unique data. And so I'm wondering. If there are use cases where a user might actually want data to be recreatable. So thinking of the balanced use case I'm thinking like is there ever a scenario where it's actually an advantage to have multiple different versions as opposed to only having this strict one unique key per archive per bucket list. So I'm thinking like for instance in the case of balances where the multiple different versions of the balance can just be summed together is, that a Advantage and is there like a do we still want to expose a repradable storage interface. So you can be even faster and I guess like not have to do this like Bloom filter check not. Because of exclusion all, that sort of stuff or is like a strict guarantee one key no collisions powerful and, that we shouldn't expose this interface at all + +[30:00] to tell the truth from my experience there is probably a known cases. When you need several versions of the same major entry in most cases it's even a destructive problem right. Because creating an account or something else may be a huge problem I'd say, that the use case for like optional recreating or maintaining several versions of the same Ledger entry is a rather and I haven't seen any use cases for this and just one question, that basically from the contract + +[31:00] to host interface tend to point like there is no version in any Video Edit to kind of work around some issues in the current approach. But in general the interfaces, that like you put something into storage and it will stay there quote unquote forever or you know you put it into Temp Storage. Then it will be removed after a certain time period. But you know like even from the period is interface standpoint seriously no case for this multiple entry versions and I think the main reason they are thinking of hiding it is just to kind of save some time and cost just to be able to quickly create entries without proof of Proof Set exclusion. But it's more like an implementation detail, that unfortunately the contract writers would need to worry about in some cases versus like you know something is there things + +[32:00] that can be used as a feature or the contractor correct like. If you need any sort of person you can build it yourself using just or as key. So what not. So I'm pretty sure there is no legitimate case for recreatable storage Beyond it like or requirements to the scalability yeah like maybe I can add, that the reason right. Now the reason we are looking at this recruitable storage is, that we have token balances, that are interestingly one of the kind of primary use cases for fast at all right and. If we don't have recreatable storage we basically have either Temporaries right entries, which for + +[33:00] balance is a no-go or going with those unique entries and for, that you need troops to create the balance. So the cost of an overhead of just kind of setting up your wallet becomes you know quite big for any new token, that's kind of the problem here is, that I think the overhead of proofs is probably acceptable the first time you kind of create your own wallet on your on the network. But anytime you add a balance for any token it seems, that having this overhead is kind of too much. But yeah maybe we, that's kind of part of this discussion right to see you know are we wrong here yeah. So I guess the trade-off and with the bloom filter approach where + +[34:00] you know and let's just put aside like the resizing and migration issues for. Now but the bloom filter approach all data is unique. But the false positive rate is the percentage of the time, that you will need to provide a proof of exclusion for creating new entry. And so let's just I guess the trade-off is everything is unique and the interface is easier. But one out of a thousand Creations are very slow and require process of create precept explosion whereas. If we have unique data and recreatable data. Then the unique data is guaranteed to always be fast. And so I guess the trade-off is do we want all data to be fast most of the time or all data creation to be fast most of the time and sometimes to be really slow for the easier user interface or have a more complex user interface where one type of data is always slow to create and one type of data is guaranteed always fast to create I guess, that's the fundamental trade-off at least in my mind + +[35:00] and, that sounds about right like the thing about the blue tilt the other is, that. If in the context of like a balance right the ID the key right of, that balance is actually deterministic. So as it's deterministic it becomes kind of attackable unless we can come up with like a cryptographic you know Broomfield of sorts it's very easy to basically you know cause certain keys to be to have complex in the boom filter. And then you're kind of back to, that you know even, though it's one in a thousand you know. If you're the one, that is always hit by the one it kind of sucks what. If we utilize B tree index or some other index or like database actually doing this + +[36:00] and besides, that the index itself can reside on the disk and the Fast Cash can be implemented using the bloom filter and the actual track will be carried over the index for example B3 index database handle this I mean the issue with an index is we're getting to, that issue where. If we have any deterministic index like, that we need to store the keys right. And then we have, that same issue of unbalanced State growth and especially restore upon data where the keys can sometimes be significantly larger to the value. So I think any like deterministic data structure we can't get back into, that issue of we have unbalanced State versus defeats the purpose of the archive in the first place yeah I just want to remind way basically we still need to maintain + +[37:00] consensus and we cannot just like randomly update boom shooter for example right it has to be a part of consensus. So it would need to come up with some way to Hash it quickly and add it to the CP values and make sure it's archived properly you know significant amount of work and I mean you could say, that the keys are stroke in The Ledger forever. And then you build some sort of index on them blockchain. But then you know yeah it kind of no longer fulfills the requirement of having limited Venture state cross, which we wanted to fulfill. So it's kind of an issue and yeah, that for what source like it was my first city as well like what. If we just throw keys in The Ledger. But yeah, that unfortunately kind of doesn't scale as well yeah. So I think the Bloom + +[38:00] filter with a proof of exclusion fallback for false positives it's an interesting idea I think we probably have some technical homework to do there. So I think it's all right for everyone to move on to the second topic I'm not hearing any objection Celtic as yes. So kind of slipping away from the user interface. Now talking about how the archiver interface will be set up. And so currently there are kind of like two proposals one where we have an archive interface, that's a functions similar to capture how Horizon functions. Now where you go to a specific archiver you have some URL endpoint. And then you query, that endpoint with the keys you want to be archived. And so this is like a model similar to what we have today with Horizon some of the pros there's pros and cons what are the cons, that you have to have like a personal relationship or at least know an archiver to go to. And then it's not super clear how we + +[39:00] could incentivize or monetize this sort of interface perhaps you would have to like pay a monthly subscription to the archive or perhaps you'd have some relationship where you like pay your archive or per entry lookup or something like, that. But it's not super clear how we incentivize people actually brought in archivers in this setup. So the second scenario is where we have kind of what I'm referring to as the archive miners kind of stealing the minor terminology from Bitcoin. So essentially how this would work is instead of acquiring an archiver directly whenever you need a proof request on chain. Now this could either be implemented at the protocol level where proof request is an operation or this might also be able to be implemented by like a first party smart contract but, that's not really important right. Now but essentially you would just submit an operation, that requests an archival proof. And then by submitting this + +[40:00] operation you'd submit meta information, that archivers could. Then adjust and, that's how the archivers would know about the requests and as part of this operation you would say the key you want to be proven the type of proof select proof of inclusion proof exclusion. And then also a reward and this reward would be variable and you would be the would be at the user's discretion as to what to set this reward to. And then this operation this request would go on chain and the metal would be a submit. And so an archival within Injustice meta. And then pick what requests they want to service. And so they could reservice the request, that has the highest reward first. And then they construct the proof with the information, that they store. And then they submit the proof another operation. And then the proof itself becomes an entry on The Ledger. And so once, that proof has been submitted and validated and the proof is on Ledger. Then the proof or the proof request is + +[41:00] deleted and the reward is given to whoever submitted the correct archival proof first. And so this is I think a better interface. Because it has a clearly defined incentive structure and also doesn't require any personal relationships with an archiver. And so you don't have to have a URL, that you talk to or you don't have to have a relationship with some company, that you pay monthly to pay some subscription fee in order to access the archive it also allows archivers to freely enter and exit the network as they please and also by having a variable reward, that the user can set you can also have essentially like a built-in supply and demand Dynamics where, that price fluctuates over time depending on how many people want to restore archived entries and how many archivers want to service archives. And so I guess generally speaking what are your thoughts on the two approaches and kind of the leading approach being this I'm submitting proof request to the + +[42:00] chain. And then having archivers read the chain. And then submit the proofs how do we feel about, that have you ever speak let's see. So like the thing I'm thinking of right in terms of like meaningful viable product I'm thinking the having a way to talk directly to archives is kind of a foolproof the approach where you use the on-chain state I think I mean it's I think there is like good potential there I think it's going to be fairly tricky to get this right the reason being, that basically your. So you have you're not creating like intrinsic + +[43:00] value right to certain transactions, that are being published on the overlay and. Therefore a like a board of source right can look at this overlay traffic and don't run take the data right, that is the primage right the proof and from the archival, that actually did the work and benefit from the academy. So I think there is like an interesting problem there to solve in terms of like how can you safely disclose the proof to the network without being from sinus right interesting some entities, that can be signing it refining well the issue, that was signing is how can like the mental not just + +[44:00] like take your proof. And then sign with its own address. And then submit as. If it was the originator yeah, that's fair I'm not sure and I think there might be ways to do it right like it's a maybe what you it's like a multiple multi-step thing right where you. Because you were first to disclose let's say the harsh of the proof before you actually disclosure proof. Then you're the one you know. If it's a contract, that's doing the, that work. Then we can basically give the first you know first one the benefit I mean at the same time like a yeah maybe a bot can I mean it becomes + +[45:00] kind of a cat and you know a nice game right like where you yeah how to do this safely yeah I think in the front room like. Because you know yours are like super nice people, that maybe on I mean not trying to game the system yeah I thought like the front running I did was just to like. If they're the same proofs in the same block just or randomly pick one. But I didn't think about this proof stealing case in front running by stealing proof. So this is definitely an interesting issue to think about. But I still like the model where you don't have to have a relationship with the archive for a couple of reasons. So first it's likely, that especially you know. If the archive systems run for a long time archivers will not store the entire archive I think it's a good idea to let the archive pick and choose how much or + +[46:00] how little of his or of the archive history they want to store. And so I can see an archiver, that only stores the last five years one, that stores the last 10 years. And then one, that's like a more expensive on the stores like the last 50 years for instance I think. If you have to like individually query an archiver you have this weird interface where for things, that are three years old you can maybe query a cheap archiver. And then for things, that are older you have to change your URL or something like, that to Target like a different archive, that has more history state or something like, that. And so I think there's still some interface issues with having to talk to the archiver directly. But at least on the top of my head I don't have a great solution to the stealing proofs thing I think, that's the challenges yeah I was going to say like the I think it's a we should definitely be looking into those mechanisms, that are like a little more distributed right from a you know Discovery Point of View + +[47:00] I think all it means is, that we use we have the proper semantics on network to allow for doing this. So like I think for example like the thing where we have proofs, that are usable independently of using you know the entries yeah I think this is like a key property, that we need to have right. Because I know like one of the earlier drafts was requiring people to submit proofs in the same transaction, that they were going to use the you know the actual and actually restore the entry and obviously this would not work in you know would not enable the type of scenarios yeah I definitely like having the or the proofs themselves be Ledger + +[48:00] entries I just I think we just need to find the best way to make sure the system isn't getting it or gained yeah there's a. Because there's not a clear solution. Because I mean you could submit say like before you submit the proof. If you submit the hash to say hey I was here first. And then submit the proof of the next Ledger. But then you could open yourself up to Dos attacks for a malicious user could just generate a bunch of dummy proofs. And then submit them for every archive request. And then archivers would not want to service those requests. Because something's already spoken for it and issues like, that. So so I think we need to think about. But I definitely like I think we definitely should have the proofs on chain like you mentioned and we should see. If there's a way to solve this issue in a way, that makes sense. If you have a you know a few more minutes left was there like some other topic, that you wanted to cover as part of this yes I guess one other + +[49:00] question, that's kind of, that we don't have a great solution for and this is one, that we need to figure out here pretty soon. Because it's required to launch for v0 is how to bump rent. And so right. Now whenever you create an entry it's created and initializes rent balance to some amount. But there doesn't seem to be a great way in order to bump rent and to actually increase, that rent balance. And so kind of the initial thought was Hey whenever you access something increase the rent and, that's way, that the things, that are accessed most commonly automatically have their rent increased and so. If you access something a lot it will most likely be on the buy policy and we'll have to unarchive it. Now for read write items it's easy. Because you have to rewrite the entry anyway. So you might as well bump the rent. However it's not clear how to bump rent on read only items. So for instance. If you have auth say like an auth record, that's almost never changed. But is read often + +[50:00] you would want to bump the rent on reads so, that you wouldn't have to constantly unarchive it the issue is. Because the way the bucket list is structured there's no way to bump rent without rewriting the entry. Because essentially the way the bucket list is structured entries and buckets are immutable. And so in order to update an entry it's not like SQL you can just go to the entry. And then change a value you have to completely rewrite the entry. And so we wouldn't want to bump rent on every read. Because then we're implicitly at the systems level turning reads into read writes, which we don't want to do. And so I guess in the read write case we obviously want to bump rent. But I was wondering. If there are any ideas as to what to do for like read-only data and how to handle rent in, that regard well simple way I've been thinking about is well you just said with online. But you + +[51:00] know I'm exposed just some contract function but, that's free tried access to the entries, that you want to bomb and nothing else, that will basically you know to call just as any other contract function to bounce around but, that maybe can be generalized to you know post functions, that allows you action arbitrary entries without accessing them so, that you know you don't need to maintain any invariants in terms of like only the contract can modify its own data I guess they can all agree, that one pins or end is always positive right. So anyone can do, that. And then you know just kind of cost functions it takes a bunch of majorities catches them at print right supposing pumpkins rent by whatever mechanism they come up this + +[52:00] which again is not super pretty but, that makes it possible to do the pump without touching the contract code and increasing inside guys. And so on. So you know it's a basically a generic way to maintain your contracts I think it's maybe viable yeah I think the host function could be good I think the only issue with, that is key Discovery is still an issue. And then it might be difficult to determine what keys you need to bump around for in the host function. But perhaps one another thing is we could expose a explicit rent bump at like the storage interface. And then also make the current rent balance readable in the smart contract. So I could imagine something like maybe a common Paradigm for read-only data would be + +[53:00] like. If you have an off entry whenever you read it you say you also have a check to see. If the rent balance has fallen below some level and. If so. Then you call Rent bump on, that value. So I guess or I don't know. If that would be possible then. Because or none of, that would be possible. And so then essentially there's a g path and an expensive path and so. If your rent is above the value. Then pre-flight would put, that entry in the read-only data set but. If your rent is below a value. Then PreFlight would put it in the read write set and bump the rent. And so I think, that might be also another possible solution for often like read-only data I mean, that's kind of possible. But doesn't solve it for everything like imagine again your token right and it has the you know an admin and let's say you don't mean this token much. So you don't touch the admin entry frequently. And then it would still expire right you know the admin + +[54:00] functions frequently enough. So you know for the important things you it sure needs some sort of manual tracking and you still need to understand, which entries need to be updated they I'm sure we can avoid this food just. Because you know some interest may be rarely accessible well I think, that's okay actually. Because if you don't access admin very much and whatever you access it you need to restore your admin entry I think, that's okay I think the case I'm talking about is. If you have some really value, that you access a ton, that still gets archived all the time yeah. But but my point is, that you know you can still forget to call it. So I mean rely on automatic bumps is not going to always work and another thing is, that imagine you know oh maybe it's not a super good example. But I've been just thinking about who pays for the bump right it's always a source account who pays for the bumps + +[55:00] and it would be a bit weird like. If every once in a. While like some transaction to your contract suddenly becomes more expensive. Because you need to bounce around. But you may be bump into rent of some entries, that has nothing to do with your account specifically like for example let's say you have some found, that stores some sorry some contracted Source some State saying it's like never going to change right and it's always free download. And then there is no clear owner of the token paywrites they're not owned by any address or anything. So what would happen is like every once in a. While someone who trades with this liquidity pool will need to pay for their inbound I was a token payer record + +[56:00] which is a little bit awkward right. Because I just want to trade why would it charge me more and you know I. Then draw all the incentives to kind of try to gain this and president not submit transactions until someone else has pumped you know I mean it's definitely viable on paper. But it just leads to distribute situations for you know you're in a bumping rent on the some interest you can respond to know about right I guess it also depends on the amount. Because it's well enough and it probably doesn't matter but. If it's high enough. Then suddenly it becomes pretty annoying for the user who ended up paying it yeah thank you I agree like it we probably need to think about those couple of angles like the people, that want to kind of maintain their run balance on those + +[57:00] like read-only type of items, that and. When I say people here could be a contract, that tries to do like an mm right, that wants to kind of ensure, that it keeps alive its own thing at the same time there are probably scenarios where you want to kind of do, that from the outside in some way right. Because you don't want every contract I mean yeah in the up cases where this is going to not work like. If nobody is using it or and you need to revalue it or I don't know anyways we have time. So thank you everybody let's continue those conversations actually we should probably create like explicit threads on the you know on the dev Channel about those topics. And then yeah we'll keep going thank you again + +
diff --git a/meetings/2023-03-09.mdx b/meetings/2023-03-09.mdx new file mode 100644 index 0000000000..1a6b86ad66 --- /dev/null +++ b/meetings/2023-03-09.mdx @@ -0,0 +1,143 @@ +--- +title: "Fees CAP Update and Prioritization" +description: "Overview of proposed updates to the Fees CAP, focusing on deterministic resource pricing and a separate inclusion fee model to improve transaction prioritization, predictability, and user understanding in Soroban." +authors: + - eric-saunders + - graydon-hoare + - justin-rice + - nicolas-barry + - siddharth-suresh + - tomer-weller +tags: [soroban] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This meeting walked through recent revisions to the Fees CAP, focusing on how to price Soroban’s various resources (compute, bandwidth, ledger I/O, storage impacts) without forcing users and clients to reason about a complex, multi-dimensional bidding system. It outlines a direction for unifying Soroban authorization by replacing `invoker`, `soroban-auth`, and ad hoc schemes with a single account-abstraction model and standardized authorization payloads so contracts stay focused on business logic. + +A central theme was separating deterministic, protocol-derived resource charges from a distinct inclusion mechanism that handles congestion and ordering. The goal is a model that’s easier to understand, more predictable for users, and less prone to unfair prioritization when workloads mix different resource profiles. + +### Key Topics + +- Motivation: Soroban resource accounting is inherently multi-dimensional (compute, bandwidth, reads/writes, bytes, events), and fee design needs to deter spam while keeping execution affordable. +- Dynamic per-resource pricing + prioritization was seen as too complex for both the protocol and clients to bid correctly. +- “Synthetic resource” (gas-like aggregation) exploration highlighted prioritization pathologies: + - weighting different resources can unfairly penalize mixed workloads + - small usage of a scarce dimension can dominate total price even when other capacity is plentiful +- Updated direction: deterministic resource fees derived from network parameters, so costs can be computed from a transaction largely independent of other transactions. +- Transaction ordering under contention shifts to a separate **inclusion fee**: + - prioritization reflects user urgency / willingness to pay (the “social value” of a transaction) + - decouples priority from internal resource dimensions +- User experience emphasis: users should be able to understand “what I pay to run” vs “what I pay to get in sooner,” similar in spirit to familiar fee markets but with clearer separation. +- Refund policy discussion: + - conservative stance against refunds for highly contended resources to prevent gaming and idle capacity + - framing: you pay for reserved/declared capacity (what you _asked_ for), not necessarily what you _ended up using_ +- Edge cases called out for analysis/simulation: + - preflight overestimation vs runtime path changes + - “unused writes” / test-and-set patterns where a write is declared but doesn’t occur + - arbitrage-style scenarios where state changes invalidate a preflighted execution path + - whether limited refunds make sense when there is no contention + +### Outcomes + +- By generalizing authorization through account abstraction, contracts can treat classic accounts and programmable wallets uniformly, delegating signature verification, nonce handling, and policy enforcement to account contracts that expose a single `check_auth` entry point. +- Contract invocations are authorized via structured payloads that capture the full call stack. Preflight recording determines what will execute so wallets can construct correct payloads, while the host tracks nonces per account-root pair. +- The proposal includes a built-in account contract for classic accounts, recommends temporary or scoped storage (such as short-lived approvals) instead of long-lived allowances, and presents proof-of-concept patterns like timelocks, atomic swaps, and token approvals, along with notes on SDK and host implications. +- This approach introduces added complexity in preflight, larger transaction footprints, and new host responsibilities. The intent is to reduce ecosystem fragmentation, simplify wallet integration, and focus authorization logic within a consistent framework. + +### Resources + +- [Fees CAP – Updated Proposal and Design Discussion](https://docs.google.com/document/d/1J-J3ClTUkrsLiJag906OH4hmNkZI3Jk6_Y9ZYt_psAI) + +
+ Video Transcript + +[00:00] Okay I'll just kick this off. So hey everyone this is our weekly protocol meeting in which we discuss and trying to make the design decisions. I think we have only one thing on the agenda today, which is an update on the fees CAP and Nico can you take it yeah thank you and yeah this is basically like we had like a few conversations in the past few weeks some more recent updates in the last week based on the yeah some of the converts we had in Discord. So we have a like a round of updates, that yeah, that we, that I actually published yesterday to the CAP. But this is an incremental change on top of the piercing. So maybe what I can do is kind of give a kind of high + +[01:00] level overview of what we are with this serve the updated theme Mode app. And then and yeah like people are interested and they can go and go read the actual CAP, that is has a lot more detail on each of those topics I don't know. If that makes sense. But like we should do, that okay. So yeah. So like the thing, that we're trying to do in this cup is there like a magical things in there. But like the one of them, that is basically like make all these changes, that are required to deal with is you know needs right like. So so like coming from classic the classic protocol where we have this flat fee pair operation the operations kind of the unit of a transaction right like we can have + +[02:00] within one and a hundred and we have this really simple model for, that in classic in soroband the resources are a lot more complicated. So you have like compute you have the amount of a bandwidth, that you're using the io like you know how many reads on rights you're doing to you know to The Ledger and how much how many bytes. And then you have like even more complicated things in there like depending on the contract you have like events, that are. Then potentially, that to translate potentially to actual into real cost for Downstream systems. So like. When we look at all of this we have to think basically in how we can make it we don't have in + +[03:00] particular like the spam type of traffic on the network. While at the same time we want to keep fees as cheap as possible on the network. So it's kind of trying to balance those things is what this Camp is about. So I think. When we left it off the last time we talked about this CAP the one thing we talked about actually was, that who are going to try to make the Ledger space something, that is actually priced dynamically and in particular we are looking at ways to limit the size of the ledger. So this is the topics, that we know we'll be having around how to be do we save data off-chain desk what + +[04:00] can presented I think it was like a couple weeks ago or even last week. So this is one component of this CAP the other things, that we are, looking at was yeah I don't like the pricing model basically for all other resources. So like yeah compute and bandwidth for example. And so we've been kind of going back and forth it was like the first like big iteration around this was to try to come up with a way to do like pricing for dynamic pricing for every resource type on the network and as we kind of looked into this into multiple like it became kind of evidence, that it was kind of + +[05:00] getting quite complicated to try to. So to manually to model it on the network itself is actually not. So bad I think the well things were getting really complicated is. When we try to think about ways to prioritize traffic basically like. When you have like multiple transactions competing for with each other, which one gets to be in The Ledger this is the ones, that are you know postpone to a future letter and. When you. So this is already getting complicated on, that front. And then there's the other aspect, that is. While this is happening on chain for clients it's also getting really complicated to try to basically decide how to what to bid basically in terms of fees right for each of those resource types. So those are like the type of things we've been thinking about in the past few weeks in more depth right like + +[06:00] trying to think about ways to simplify this a bit and at first we're looking at doing something, that was trying to kind of come up with a like a more I think I call it in the CAP currently like. Because this is still you can still see it as part of a more like an appendix type of thing right like a alternate implementation yeah. So come up with a synthetic resource, that is basically an aggregation of the different resource types, that the specific transaction is using, that basically combines and comes up with some something, that is maybe like similar to the notion of gas, that are for example refining Ethereum, that work basically, that + +[07:00] synthetic thing represents compute. But also disk IO and yeah like a bandwidth any resource really in the system is kind of you try to kind of fit, that into this synthetic thing. And then yeah. So we started to kind of explore, that and what we found is, that it was still fairly complicated to try to be fair. When prioritizing transactions like you have like certain scenarios. When you come up with like a recipe right for this synthetic asset where we basically for example you say okay I have like took to combine those different resource types I'm going to take some of the compute numbers, that I'm getting I'm going to give away to + +[08:00] this and add to, that like with the different weight the I o cause and you combine all those things right into a single number, that ends up being your number of this aggregate resource type and then. When you do, that you end up with like very strange prioritization problems now. Because if you try to balance for example how to how would you compare a transaction, that for example does a lot of computation to a another transaction, that does almost no computation but, that has, that is very heavy on a hero I came up with those weird Trails where you have to really penalize one of those + +[09:00] two types of transactions and it gets even more complicated. When you have like a transaction, that does a little bit of both. And then and. Then you end up with people overpaying a lot for basically getting penalized a lot for using a little bit of a resource, that happens to be in demand. While you're also using an expensive. But available resource. So this is like the type of problems we are running into. So yeah, which would actually happen like quite often like imagine. If you have like a lectures being filled with a lot of what the constraint it is let's say a computation where for whatever reason there's a lot of traffic where. When people are doing let's say like a trade right on against + +[10:00] an AMA. So like this one is actually a little bit of IO. And then computation they're like maybe depending on the AMM implantation. But it could be like something fairly complicated and let's say you have a lot of trading at this happening you still have plenty of IO available to your on the network. But IO is going to always be expensive in with the way we want to kind of to model this. Because adding things to The Ledger is kind of one of those things where I think something to deledger is something, that has an impact on many systems right. So so relative to compute price adding data to the to The Ledger yes is all those are magnitude more expensive kind of like what you see in a + +[11:00] in the classic protocol where the Base reserve is a lot more expensive than just like the transaction fee right, that's kind of the way to think about this. So then. If you ask people okay like you're in order to get into The Ledger you have to be let's say a hundred times you know the price of your aggregate results. Because of congestion on gas or sorry on compute asking people to pay a hundred times for Io in this case is kind of weird right it makes things very expensive for no real good reason. So anyway. So like delete this iteration what we've kind of looked at was how could can we make this problem go away and the current approach, that we are, that we can have landed on is to rely more on + +[12:00] or pricing resources on something, that is more like network parameters or like automatic and automatic pricing right of resources similar to what was actually already in their form for Ledger space where basically. If you look at the transaction you can derive independently from other transactions the price of the of those resources. And then the type of market dynamics, that you have are not actually on the resources themselves. But on I think I called it the cost of inclusion in a ledger and I think this is basically Shifting the conversation there around + +[13:00] why do you want to be ahead of you know. If somebody is a means transaction why, that person wants to be ahead right and I think the name, that is used in the literacy around, that is the social value of the transaction the social value has actually nothing to do with the resources like the amount of resources, that a specific transaction was using it's really you know based on whatever people are actually doing with, that transaction. So in some cases even, though you're your transaction might be doing a simple you know set one bit in on The Ledger it may be actually something, that unlocks you know like a huge value you know down the line right. So so there's like decoupling of the two is kind of interesting + +[14:00] let's see and yeah. So like this one like the well this through to work in a way, that kind of makes sense the assumption, that we're making is, that we will be able to have like a same way and. When I say we here it's basically the protocol and validos right, that are voting for some of those network parameters such as how much does it cost to store you know like let me see like one kilobyte of data on natural like this type of pricing this is what we're talking about so. Because when you have the price of the transaction basically decompose it. Then into those kind of network derived fees, that are like I said like the cost of storing data for example on The Ledger plus this inclusion fee, that + +[15:00] is an actual building per transaction and, that's the total fee, that you end up paying for and yeah so, that's kind of the Focus right. Now is to see how far we can take this idea or where the inclusion fee is unrelated to actual resource utilization so, that's kind of the thing we are looking into right now, which is doing like modeling and Analysis to see like the where this breaks and what are like the. If this basically holds us a as a variable approach to this problem like questions on this at this point yeah the one other note I want to make here is, that like I think besides just prioritizing + +[16:00] transactions just from my user experience perspective I think it should be a requirement, that the user can understand what's going on here right and with all of its faults in Ethereum ecosystem at the end of the day you know exactly you know at the end of the day there's just like the gas resource and you're competing for gas. So like the question is with these new Concepts, that you're introducing what is the user experience actually this is very similar to what you've seen in there I mean it's a variation your experience is, that oh my for a given transaction you have actually two components you have the your guess price right, that you have attached to the transaction and you also have a I think we call it a chip or something for the validation + +[17:00] that allows you to kind of you know get included. So there's like something, that is similar to this inclusion fee the difference is, that in Italian the inclusion fee is actually tied to the amount of resources, that your it's a rate basically it's not a flat fee it's not technically it's trying to do more of the dynamic price signals we saw of these aggregate resources as opposed to kind of stepping outside of, that and assume, that the price, that you actually have your base price for resources is relatively accurate. And then this prioritization thing is modeled adjusting fees based on like the + +[18:00] yeah the use cases right as opposed to the actual like, which limit you're hitting in the system got it okay let's open this up to questions so. If anyone wants to ask a question please do this on the chat or just asked to be included in the stage Nico just one more question for you like what are the open questions, that you like is there anything, that you wanna consult a team with here right. Now in terms of questions, that you want help answering I think, that they're like there is one aspect, that is in the cup I mean around like the + +[19:00] which resources we want to allow for refunds is kind of an interesting question right. Now on the in The Proposal I think we can only get refunds for I think it's basically like anything, that goes to dolphins Downstream systems like you know basically what gets into Horizon or sort of an RPC right. Now it's basically the cost of this of the meta outside of The Ledger country changes the reason for not giving out refunds for other resources is, that right now. If if validators make certain decisions right, that they are not going to include certain transactions. Because of you know + +[20:00] we're hitting some limit let's say on a bandwidth let's say an Archie bandwidth is not a good example. Because we know exactly the size of the transaction. But like for compute right like. So let's say a transaction says oh I'm going to use 10 000 units of compute. Because they are using 10 000 units of compute, that means we don't include up to 10 000 units of compute from other transactions right, that tough maybe the same priority. And then later let's say, that transaction decides well actually you know I'm going to only use a thousand. But like basically by do by doing by using less it would give an incentive to just like yeah like + +[21:00] people only just put like maximum numbers on everything. And then yeah their Ledger would be kind of idle on the process this one you know one entry. So I think this is like a the reason why we don't want to do refunds on the resources, that we think will be highly contentious. But like I wanted to hear like. If they are like any opinions on, that topic let's see. And then the other thing yeah is everyone really like the modeling of this of these prioritization based on the social value of transactions I think yeah people have maybe like more opinions on this, that would be great. Because I said things like a one of the thing, that is kind of new in this proposal. So yeah + +[22:00] get can you offer a bit more information on the social value I know you mentioned it before. But it's a relatively new concept it's not new I mean it's not new in these in the crypto space right. So it's like the idea, that a low a transaction, that is the importance of a transaction is actually completely independent of the amount of resources it's using right like it can be, that you know maybe it's a simple payment you know could be like moving billions of dollars you know like. When you compare, that to a large transaction, that does a bunch of stuff you know the same thing right it could be you don't know is it doing something, that's not super useful or is it actually a transaction, that is related to like a payment channel right + +[23:00] or a layer two type of transaction, that is extremely important right the value is the initial and inner and value of this transaction is actually off-chain anyway how did the user determine the social value of a transaction they're about to submit well it's kind of a it's the similar idea. Then you know. When you're saying okay I'm going to build you know 2X or whatever the base fee today right on the classic network it's a similar thing where at some point you say I'm willing to pay up to X for my for this transaction right it's the same idea it's just like here we're saying it's we make it completely independent of resource utilization + +[24:00] that's the one difference and otherwise the same idea of like in terms of user experience yeah you want to you kind of you want to get people to think about how much they are willing to pay for a specific transaction in terms of not solid terms they look at it. Because that's actually not the way people think about it right like. When you have a you know you're trying to pay your at a store right like he maybe like the way you think about it's not in terms of like oh yeah like I want to be to think about it in terms of relative to the cost of, that transaction on this particular network is it's kind of hard right like + +[25:00] it's in traditional systems it's going to be a fee a percent of your amount you know I guess, that's kind of a way to think about this like the, that's tied to the actual value the real world value of this transaction it has nothing to do with the cost of actually processing the transaction let's say on the Visa network right, that plat fee, that you can, imagine exist right is actually not visible to end users right in those systems and it shouldn't be. Because it has nothing to do with the value of, that transaction I wanted to say, that the problems, that we're talking about here are actually not specific to Stellar or blockchain or anything like, that these are exactly these are standard problems in multi-value optimization scheduling. So this is a thing I spent a bunch of time working on you know for my life + +[26:00] so you got the usual problems you have to decide how is there a deterministic relationship between the things, that you're paying for and the things, that are available. So in our case it's compute or whatever. But then there's this other aspect, which is how much does somebody need the thing to be done and, that piece is usually not modeled within the framework of the scheduling. Because it involves a bunch of as you say sort of pieces, that are too complicated to be encapsulated and they have nothing to do with the cost of running anything. So to give you like a concrete example in astronomy you have scheduling your telescopes and you want to do something similar you want to put observations on these telescopes and it costs money to do, that. So there's a bunch of finite stuff, that you have to pay for. But then there's the question of this particular observation I want to make today how important is it to my science and, that question can't be encapsulated + +[27:00] in a general model it's very specific to my actual use case. So the analog here is I have a smart contract and this thing is very important for my contract to execute I'm willing to pay more for it. And then so it's very normal to have this as a separate layer. But all the problems, that we're going to run into are the same problems, that are covered in every scheduling system right. So this question of determinism how much does it cost how do you do a trade-off between orthogonal variables how do you provide this thing in a transparent way. And then how do you deal with gaming of the of this social aspect like. If I have enough money can I dominate the whole system and basically distort the parameters. So these are like hard problems. But also they're standard problems I guess, that's what I'm trying to say exactly and, that's actually why we ended up where we are. Now is, that we didn't want to + +[28:00] to kind of come up with, that aggregate resource right like, that is basically like what are the weights you know how much weight do I want to assign to each of those resource types to turn this multi-dimensional optimization problem into a single variable right and it seems like the most practical way was to actually really couple it kind of like using your example where you have resources, that cost real money right like to run a specific in our case right to execute a transaction it should cost a reasonable amount right, that is basically like you wouldn't have like junk transactions being executed on the network and after, that the second layer is more + +[29:00] of a market for getting to, that queue. So it's basically like a kind of a almost like. If you're going to be I don't know like at the once it's like the another a good analogy would be yeah. If you could buy your spot in line the day of. But it's like those Black Friday sales right like you're outside the store you get to get in the line. But once you're in the store it's still the price of the store you don't get to you know you don't pay more. Because there are more people you know in line inside the store I mean and, that we redecoupled the two problems yeah. So there are a few fundamental questions, that I guess it would be great to get answered. So like. If the first question is these things, that we believe are like compute for example is it + +[30:00] deterministic enough, that we could actually Define a cost for, that would not vary yeah I think so, that's kind of the we can the current idea is to yeah to give it a flat rate basically like the amount of computing you use would be a linear function right to the I mean the sorry the cost for compute would be linear to the amount of compute you're using and then, that will tune it over time I think we can over time maybe come up with more complicated models right, that the system can, that we can follow. But like right. Now we are using linear functions for, that + +[31:00] and yes yeah and the Assumption sorry yeah is, that we should be able to on a regular basis have a vote basically you know between validators to see okay should we adjust, that's, that function. Because of the patterns we've seen right in the let's say the last month or something I was getting two, which is, that. If you have a multivariable optimization. Then what happens is it's tuned to the environment, that you observe and you know you treat these parameters like the simplest ways you have a bunch of variables you're sticking together with plus signs you put coefficients in front of them. And then those coefficients are tunable knobs, that you use to decide how to trade off the different orthogonal dimensions but, that can be tuned to a particular workload. But then. When the workload changes, that usually doesn't work + +[32:00] anymore and you have to retune it. So a key question is as the kind of things, that happen on Stellar evolve how will we adjust or adapt to, that but, that's the thing I'm saying is, that the we are not going to prioritize transactions based on this combination you know of things right the instead we are prioritizing based on the kind of perceived right like the social value basically of those transactions I think, that's kind of the key element to this proposal is, that we don't try to be smart in terms of like the yeah trying to kind of boss like try to fit everything into this linear combinations of Dimensions. Because you end up. If you do it like, that you + +[33:00] have a you end up in a situation where it's kind of like the you know. If you're like pizza by modal distribution right on your own you have two types of transactions right, that are on the network and you try to put a line in between them and no line is going to work like you just end up with basically you have to decide okay do I prefer the first group of transactions. So the second one and then. When you're outside of, that your fees are just completely broken or your prioritization is completely broken I completely agree with, that the other aspect of this is the refund thing, that you talk about I think, that another way to think about this is, that to use your analogy of waiting to get in the Black Friday sale you're paying for the privilege of + +[34:00] having the spot in the lane or the queue you're not paying for what's actually going to be able to buy. When you get in the store so. If I you know pay for 10 000 compute units. Because that gives me the space and I actually only used a thousand well I paid for ten thousand. Because that's how I wanted to you know justify my position in the line right. So I think it is consistent to not offer refunds. But maybe you can frame it differently as really what you're paying for is not the usage at runtime it's for your expected usage exactly yeah any more questions from the audience. So Nico I understand, that there's + +[35:00] probably another draft of this coming request, that's open Dima, that is the other person like you didn't have time yet to merge it. But yeah there should be I'm hoping to get more feedback from people like it would be nice. If we can get some help on yeah playing with, that proposal in terms of the you know like simulations right like a c what happens. If are there like situations where it would behave in ways, that are really not great like you know like the example I used with. When you start to combine things right where you end up with certain transactions end up being penalized or not and My Hope Is, that this proposal doesn't do, that + +[36:00] that but, that would be yeah very curious to see. If they are like things we missed basically in this one and there are probably situations where the thing doesn't work right. But like having a better understanding of those situations would be super useful got it and are there open questions or is the proposal complete as it is right. Now or are there open questions in it I mean it's mostly yeah it's mostly there I would say they open questions like they are more like the minutia of you know transaction set like what is maybe the like going back to like the refundable stuff maybe we want to have the value data or actually be able to say to give a hint, that. If the transaction set let's say was put together and there was no contention on + +[37:00] anything. Then maybe we should be able to give refunds to people like there was basically like queues we are kind of empty. So you know there's no reason to basically penalize anything. But like I don't know. If that's you know. If this will actually happen in practice. Because we tend to see more of a you know cues actually have a bunch of stuff and it's never enough capacity to do anything. But at the same time like I was saying like earlier like mainly the contention is all on compute and not on iOS then. If people don't use all their eye or like let's say they. Because some of the problems here, that I'm talking about as the imagine you have a test and set type of contract right a contractor basically does a read. And then in some conditions going to update The Ledger entry well in this proposal regardless of + +[38:00] of what the contract is going to do. If you said you're going to write and you don't write. Then you're paid for the right, which is a bit sad I don't know. If it will happen or not but, that's something, that I think may happen quite a bit. If maybe in certain like trainings is it the type of trading contracts or something like, that for AMMs I don't think it will happen. Because we always have like those tiny adjustments to the results. But maybe different kind of yeah I mean like I said it's a test and set type of situation. So I would imagine it should be fairly common in the in kind of the general case. So this is specific for situations in, which you know let's say pre-flight + +[39:00] determined, that you're going to go in one code path. And then you know the state changes and. When you actually go on the network it takes a different code path yeah like and this situation is like. When you have like an Arbitrage opportunity right like you have people say oh I can trade. And then so then. When you pre-flight they see, that yeah I can trade and, that folder they are going to submit the transaction. But then. When their transaction gets executed. While the trade was already taken. So it's a no right so, that's kind of what I'm thinking I guess in the AMM. Because that's exactly what will happen is, that you don't update the results in, that case. Because the swap is just not going to do anything. But you said you were going to swap. So now you're paying for, that. So yeah + +[40:00] so in these kind of cases one lens, that's kind of useful to look at it is you have all these things to do and you can't do them all, that's why you have prioritization right. And so really the question of your algorithm is who loses who is going to lose and not get in answering, that question deciding what you would like to happen in those situations helps to inform the structure of your optimization right. So it could be, that what we're aiming for is optimal use of compute and I O. And then the ability for people to really push for it. If something seems important to them and you know and. If you have, that if, that's your thesis, that's the lens, that you're using. Then you can make a decision about whether it makes any sense to charge someone. When it doesn't actually happen or not. Because maybe it achieves your goals right. So I think, that's like one way to think about how to move forward on this yeah I mean at the same time I think the current gap on, that front is + +[41:00] conservative version right similarly. When you have like a. If a contract traps right. Now we basically say you don't get any refunds. If you trap. Because you've been wasting this clip you know you had a promise it was going to be a super useful transaction. And then it didn't do anything. Because it crashed yeah it's not bother right trying to basically it's kind of going back to what you're saying like we want to encourage people to not submit transactions, that are crashing or doing nothing. So maybe the current proposal is the right one okay was there many more questions in the chat or something, that we should be looking at + +[42:00] foreign I don't see any more questions in the chat. So I think, that unless there's something else, that you want to bring up today Pico I think we can call it a day no I think, that's. So like yeah. So we'll continue conversing in the live stage or the live chat. If anyone wants to keep talking yeah you know, that's on the regular Dev. So people want to boost, that too all right thank you have a great day + +
diff --git a/meetings/2023-03-16.mdx b/meetings/2023-03-16.mdx new file mode 100644 index 0000000000..9bc99c8403 --- /dev/null +++ b/meetings/2023-03-16.mdx @@ -0,0 +1,192 @@ +--- +title: "State Expiration Roadmap" +description: "This session outlines Soroban’s state expiration rollout plan: shipping rent + temporary storage at pubnet launch, finalizing SDK interfaces for unique/recreatable storage, and enabling expiration later once the backend is ready." +authors: + - dmytro-kozhevin + - garand-tyson + - graydon-hoare + - justin-rice + - nicolas-barry + - siddharth-suresh + - tomer-weller + - tsachi-herman +tags: [soroban, CAP-46] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Soroban is a smart contracts platform designed with purpose and built to perform. + +This discussion focuses on the roadmap and implementation sequencing for state expiration: what must be ready at Soroban pubnet launch versus what can be completed and activated afterward. A key theme is prioritizing a stable developer-facing interface early, even if the full storage backend and expiration mechanism is enabled later. + +The group also digs into how rent should work in practice—especially who pays it, how contracts decide when to top up, and how fee variability interacts with developer expectations around predictable lifetimes and deletion behavior. + +### Key Topics + +- Rollout plan split into two stages + - At pubnet launch: implement rent + temporary storage fully, and finalize/expose SDK interfaces for unique and recreatable storage + - After launch: enable state expiration (actual expiring/archiving) once the storage backend is ready + - Rationale: lock the interface early so contracts deployed from day one can be compatible when expiration is later activated +- Terminology update + - Naming pivots from “archival system” to **state expiration** to avoid confusion with history archives +- Rent model fundamentals + - Each Soroban ledger entry carries a rent balance (XLM tied to that entry) + - Rent is charged over time for remaining “live” in ledger storage + - Rent fee rate is designed to vary with storage pressure (bucket list size), aiming for a system equilibrium that discourages unbounded state growth +- Who pays rent is contract-dependent (no universal default) + - Token-like balances: the user “owns” their balance entry, so user-funded rent feels appropriate + - Shared AMM state (e.g., pools/pairs): a shared resource used by many, so it may be better funded by a shared mechanism rather than whichever user happens to touch it next +- Proposed rent payment primitives for contracts + - Expose two primitives: `rent bump` and `rent bump from` (modeled after `transfer` / `transfer from`) + - Enables explicit control over who funds rent + - Allows allowance-style patterns so a contract (or maintainer) can fund rent on someone’s behalf without requiring that party to sign each time +- Expected developer pattern for keeping entries alive + - Contracts likely add checks inside common getter/setter helpers: + - inspect current rent balance + - if below a threshold, bump rent (and choose the payer logic) + - This keeps policy flexible while making frequent-use entries self-sustaining +- Abuse/spam concerns and mitigation thinking + - Concern: shared pools could be drained by junk entries + - Suggested approach: bump rent only on meaningful access; junk entries that aren’t used don’t get topped up + - Also noted: contracts can add economic friction (e.g., charge to create entries) +- “Don’t store the data, only store proofs” idea + - Not treated as a protocol-special case + - Suggested approach: store hashes/checksums on-chain and keep sensitive payloads off-chain, using the contract to verify integrity via cryptographic hashes +- Predictability vs variable pricing + - Developers want to reason about “how long does this last” in ledger time, but fees vary with storage pressure + - Temporary entries were discussed as needing strict, ledger-count TTL semantics (especially for security-sensitive uses) + - Unique/recreatable entries are less sensitive since they can be restored, but UX predictability still matters +- Bucket-merge granularity realities + - Expiration/cleanup aligns with bucket merge events, which implies coarse-grained practical effects (e.g., week/month-ish behavior for long-lived entries) + - Discussion notes conservative charging/expiration behavior can lead to entries living longer than their nominal target in some scenarios +- Open design tension: time-based guarantees vs dynamic rent rate + - Proposal floated: lock an entry’s rate for a period after a bump (a “subscription-like” behavior) to improve predictability + - Tradeoff concerns: incentives for gaming/“rent control,” fairness between older and newer entries, and potential impacts as the system approaches storage bounds + - Outcome: recognized as an open question to resolve in follow-on design work + +### Resources + +- [CAP-0046: Soroban smart contract system overview](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046.md) + +
+ Video Transcript + +[00:00] Welcome to this week's Stellar well actually Soroban Design Discussion this is where we have public discussions about key decisions about the evolution and development of sorbonne as anyone who is here probably knows Soroban is a new smart contracts platform, that is designed to work well with Stellar we are deep into its Evolution it's been on futurenet since October there have been I believe seven preview releases of Soroban and we are continuing to iterate a focus to hone in on important changes and decisions, that need to be made and to implement them as we move towards the completion of production ready code. But today we will be continuing to talk about some of the key decisions, that still need to be made and I feel like I have. Now stalled long enough to let people join. So welcome we sort of have a little bit of a loose agenda today. But I believe, that there was something, that we wanted to talk about and I believe, that Garand + +[01:00] no is gonna start by introducing the topic is, that true Garen yeah can everyone hear me all right yes yeah cool. So I guess you know for the last couple of weeks we've been talking about the high level archive interface and this is kind of the interface where with Soroban data types we charge a fee for staying live on The Ledger and then. If that entry does not pay the fee. Then they are sent to this archive where you can. Then restore the entry for some fee. And so we've talked about the high level. And so today I'd like to talk a little bit about our implementation timeline for this project. And then go kind of a deep dive into some of the details in the you know first step in the implementation timeline. And so first off I think in the last couple weeks we've been calling this the archival system and we are officially pivoting naming wise to State expiration just to remove any confusion with like history archives or anything like, that + +[02:00] and. So I'll be calling it expiration today. But it's still the same system we've been talking about. So first I'd like to talk about the timeline. So there's a lot of moving Parts here and there's a lot of complexity especially with the structure of the storage itself using potentially like a vertical tree or some new data types, that aren't fleshed out super well yet. And so we are staging this in two parts we have a part of the project, that needs to be done. When we launch Soroban pubnet. And then a second part, that we can kind of finish later on after launch. And so at launch there are two big components, that we want to have ready and done. Now the first is we want to implement the rent system and temporary storage completely, which means, that from day one sarban entries will need to pay rent and also will have rent charged from them. And then secondly we want to have the unique storage and recreable storage interface finalized and exposed in the Soroban SDK. But we won't actually turn on state + +[03:00] expiration yet. So what this means is on day one you can expect to have all the tools you will need to deal with State expiration even, though entries will not yet expire and be sent to the Deep storage and they won't actually need to be restored kind of our thinking with this is, that actually implementing the storage backend is a very significant problem, that will take some time. But as long as the interface is set and finalized at launch even, though we haven't turned on state expiration yet any smart contracts are deployed on day one we'll have the proper interface and the proper tools to deal with State expiration. When we do eventually turn it on later. And so kind of with, that in mind I'd like to talk about today some of the details between the rent implementation and the temporary storage implementation, that's kind of the first step implementation wise and getting State expiration into sorobot. And so first I'd like to talk a little bit about rent and some of the opening questions we've had there in some of our + +[04:00] findings. So we've discussed rent briefly before where essentially every sorbonne data entry will have a rent balance field attached to it and this rent balance field contains some XLM, that is tied to, that specific data entry. And then periodically over time the rent fee is deducted from this rent balance where the rent fee is the fee for being on the network being live on the bucket list for a given Ledger. So the rent fee is variable based on the size of the bucket list kind of in a similar way as to the bucket list write fee in the fee CAP the thinking here being, that we want the rent fee to increase as the size of the bucket list increases the reasoning for this is, that as The Bucket List gets larger the fee for adding the bucket list also increases. And so we don't want this fee to grow exponentially or grow unboundedly. So by also having the archive or the rent fee increase at the same rate what you can have is this + +[05:00] equilibrium where as it gets more expensive to add to the bucket list and as The Bucket List gets bigger the fee increases. So it's more likely, that more entries will be removed and deleted from The Bucket List as they run out of rent and as they expire. And so we kind of have this equilibrium system set up with this variable fee rate. Now in addition to the variable fee rate I think one of the questions, that we haven't really addressed yet is how we actually pay for the rent on these entries. And so initially we wanted this process to be as seamless as possible. And so we wanted a way to automatically bump rent whenever you access items other thinking being, that the developers shouldn't have to worry about manually paying rent for entries, that are used very often and. If an entry is often used it should just automatically have a small rent bump. So it's always live and it never gets sent to the archive. However the issue with, that is there didn't seem to be a good default behavior for paying rent primarily. Because different types of contracts + +[06:00] might have to pay rent differently. And so I think the big question here is who is responsible for paying rent is it the user who's invoking the contract, that's responsible or is it the contract maintainer itself who's responsible for paying rent and kind of in our case studies we determined, that there are two separate use cases, that kind of illustrate this example well. So one example is you have a token contract such as USDC where you have users, that have an individual token balance, that they kind of own. So even, though a circle or some third party would be maintained this contract the token balances are tied to users. And so it seems to make the most sense, that the user would have to pay to maintain, that token balance. And so in the token contract example you would want the invoker of the contract to have to pay rent fees. However in the other example you have something like an AMM and say, that we have an AMM, that has a liquid equal or an asset pair now. If you want to access this asset pair this is kind of a public + +[07:00] shared resource of the AMM itself. And so it doesn't make sense for the individual user who's accessing the access pair to have to pay for the rent even. If that entry would be used by many different users of the AMM. And so we kind of have these two separate cases where in the token contract you have this kind of individual owned entry by one user where in the AMM case you have like the shared resource and we wanted to see how we could fit the rent payment into, that paradigm. And so there didn't seem to be a clear way to default bump rent. Because there's no good default Behavior as to who pays. Because it's very dependent on the contract and the data type, that you're dealing with. And so instead of offering some mechanism to bump the rent by default what we were doing instead is exposing two Primitives, that a smart contract developers can use to define how retinas paid for their entries. And so what these two Primitives are we're calling them rent bump. And then + +[08:00] rent bump from and these are modeled after the token transfer contract. And so kind of in the native asset contract you have two functions, which is a transfer and transfer from, which is how you are able to transfer tokens. And so essentially the rent bump from functions work in a similar way insofar as you can Define who the from address is from. So who is paying for the rent. And then also with rent bump from you can also pay out of an allowance. So this allows smart contracts to do is to find who can pay. So for instance in the AMM example it might make sense, that for every transaction in the AMM a small fee is collected and put into a central shared pool. And then from this Central shared pool the AMM pays out rent to the shared entries in this way the cost of keeping those entries alive is spread out and amortized among the entire user base and + +[09:00] so you don't have one particular user, that gets unlucky and has to pay to bump a rent entry. If they just. So happen to access it. When it's low on rent. But instead you have this centralized pool, that the contract can pay from. And then in the token example instead of having a centralized pool the user itself would have to bump rent. But we want to give flexibility. And so it might make sense for certain implementations to still want to have a user pay for rent. But instead of for instance for usability purposes the contract would want to pay on the user's behalf. And so using this interface. Because it's similar to the Token interface allowances can also be used for paying rent. So for instance say, that a circle for whatever reason wanted to maintain user accounts on the user's behalf. But didn't want to pay for it what they could do is, that they could the users could give the contract an allowance. And then from, that allowance they could call it rent bump from. And then bump the rent on the user's behalf even without the user themselves + +[10:00] signing or invoking, that operation. And so with these two Primitives we think, that we have a pretty good coverage as to the use cases as to who pays and how you pay and kind of allowing for these individually owned entry types and also these shared resources. And so I guess on the rent and rent payment front are there any questions or anything, that we want to talk about there Karen it looks like there is a question from moots in the chat, which is does this mean contracts are responsible for exposing and defining logic to pay slash bump rent for contract data entries yeah. So the answer to, that is yes. And so looking at I think we were trying to again find a way to do this kind of under the hood without the developers explicitly saying it or explicitly defying it in the contract. But the issue is there is no good default Behavior. Because of the different use cases between something like a token contract in AMM. But looking at kind of the design paradigms, that we're seeing in token + +[11:00] contracts. Now whenever it comes to State a lot of contracts including the example contracts, that we have in the Soroban samples repo have these helper functions for getting and setting values. And so I think what makes the most sense from a developer perspective is just adding an. If statement to these Getters and Setters, which most contracts already have to find and essentially what this would do is like a say you have like a git token balance function what would probably make sense is, that you have an. If check there and say. If the rent balance of this entry is below say a hundred lumens or whatever value you want. Then bump it by a thousand and so, that's kind of how we're imagining this would work from a contract developer's perspective is you'd have these scattered and Setter functions with this. If statement check. And then this also allows the contract to Define who pays and how you pay within this. If statement, that's another important part of this is, that the current rent balance of the entry is exposed within Soroban. And so the smart contract itself can check the current + +[12:00] rent balance and see. If it wants to bump the rent or not bump the rent based on, that good balance oh I have a question I'm really concerned about the pool Solutions just. Because it seems like too easy to abuse them by creating just a lot of junk entries, that you know contract would bump unconditionally like oh okay think of allergies. So I think this would have to be an implementation detail of the contract itself. But I think a smart or a wise development strategy for something like an would be to have this fee pool. But to not pay rent for all entries equally. So for instance one possible implementation would be, that you have a few pool. And then you bump entries on access and this is defined by the AMM contract itself. And so for instance. If you have a junk entry + +[13:00] that's never actually used in the wild. Then its rent would not be bumped from the people. Because at least how I'm imagining a solution would be is whenever a user accesses like an asset pair or something like, that you check the current balance or the current rent balance of the asset pair. And then based on, that balance decide to bump or not. And so in your example. If you have a Spam entry, that someone may charge to game the system or drain the people or something like, that first off I would assume, that the AMM would probably want to have some price to create an entry to deter spam like, that. But also. If the spam entry is never accessed. Then it would never enter, that. If check. And so Sprint would not be eligible for bumping. If that makes sense cool. So I guess God I have a two questions okay. So the first one is about a state. So my understanding and please call me + +[14:00] if I'm wrong, that at some point we want to come to kind of like Get rid of some of the state and basically send it to archiver right and be able to delete it effectively from The Ledger and. If needed restore, that later on my co-workers. So far yes okay and I also assume, that. When we want to restore it right we will have some security guarantees like cryptographic guarantees of some sort like as you mentioned the vehicle 3 or something of, that sort yeah correct okay, that's great. Now I'm wondering what happened. If let's say hypothetically I'm creating a application and. And so forth and my transaction is such, that I don't want + +[15:00] want my leisure keys to eventually go to the archival I want them to disappear or dissipate into the nothing and. When sending the next transaction later on I want as part of, that transaction to send the content of The Ledger key well instead of going in during the execution time network itself would use, that is the source of truth instead of going to the archival right and from security perspective, that should be equal. However it does give me as a developer the power to let my information not being stored on every node of the network forever it basically moved the power to me + +[16:00] whereas the network is just storing the state. If that makes sense so. If I understand your statement correctly it's the network is storing the state. But not the key and the user is responsible for maintaining their own Keys is, that what I'm understanding I'm not quite sure exactly. So basically I'm suggesting, that the archival is fine and I realized, that for me for the classic use case you want to basically keep storing the state for everyone. But for some people it are very sensitive about their information they might want to use the network to store the state with all the cryptographic proof right and everything. But without the data itself and let the data be more ephemeral okay I understand I think + +[17:00] I think this is probably possible and correct me. If I'm wrong on this. But just in the current smart contract interface right I don't think it needs to special case this into the archival system or the protocol level. So for instance. If you cared about this what you could do is have a smart contract, that takes a hash payload and Stores a like hash payload has one of its data members and this hash payload could be the checksum with cryptographic hash of data, that the contract developer has off-chain somewhere. So I don't think this has to be a archive or protocol specific special use case I think the current Soroban SDK already supports this they're just beyond the onus of the developer to make such a system yeah yes you're saying it would be a already possible to implement it independently yeah. Because what you could do is just have a smart contract, that has like you know just Define some key space. And then + +[18:00] the values of those keys are cryptographic checksum of data, that start off-chain I don't think we need to special case, that or should special case, that. Because the tools exist already to build a contract, that does exactly, that let's see interesting okay, that sounds interesting thank you Karen regarding the fact, that fees change based on the size of the bucket list what is the expectation how are users or developers expected to make decisions obviously you know they want to pay rent for a specific amount of time and they don't have access to this information. If I understand correctly yeah. So this is one of the open questions is to do we Define rent in terms of Ledger time. And so I think. When we get to the temporary storage in + +[19:00] a bit I will talk a little bit more about this. But I think from a market perspective and from like a system performance perspective it makes sense for the v2b variable based on Buckle the size. So we can have some sort of upper bound on bucket list size via this increasing fee rate. But from a user perspective I do see how there's a significant advantage to having a definite kind of Ledger you know estimator as to how long something will live in terms of ledgers instead of this like ephemeral fee or something might last five months Etc. And so I think what we decided in listening to some of the developer feedback on the Discord Channel it looked like, that for temporary entries it was important for temporary entries to have a definite well-defined lifetime in terms of ledgers, that is. If you define a temporary entry, that has a TTL of 128 ledgers it should live exactly 128 ledgers no less no more. So I think, that for temporary entries + +[20:00] especially for anything, that might be a temporary allowance or anything, that has security implementation or implications it is very important to have a definite Ledger Lifetime. And so for temporary entries the plan is to offer a set amount of ledgers. And so how, that will look is in order to keep people from essentially abusing, that power and getting free rent or cheaper rent we would have to have some sort of upper bound as to the amount of time, that a temporary entry can live and so, that could be like three months six months something like, that. And then essentially people would have to do as far as the fee is concerned is, that there's two options here we could either just kind of take a best guess as to what the fees would be over the lifetime of, that temporary entry and the estimate and say okay like say the current front fee and it's just an arbitrary number I'm throwing out there is two XLM per Ledger. And so we're going to pretend, that stays constant. And then charge this entry to XLM per Ledger after its entire lifetime and, that's the + +[21:00] fee of the temporary entry, that's one way the other way is potentially is, that we know, that the upper and lower bound of the fees. Because the bucket list can only grow at a certain rate as the upper bound. And so another possibility is to essentially charge the upper bound rent as. If the bucket list was to grow as much as possible every single Ledger. And then whenever the temporary entry gets deleted refund the difference. But essentially for temporary entries there will be some sort of fee model or some sort of something to ensure, that it does have a very specific amount of ledgers, that can live. Now for the recreatable and unique storage types, that can be archived I think it matters and again I'm open to developer you know feedback on this I think it matters less, that they exist for a specific number of entries. So essentially. If you have recurable storage in temporary storage. If it say + +[22:00] if you define 128 ledgers worth of rent. And then for some reason it you know rent goes up. And so it dies at Ledger 125. You can still go and recover, that I think it would be annoying from a user perspective and maybe like not the best ux. But I think it is a recoverable entry. And so there is no long-term long-term permanent side effect for having a ledger, that is archived slightly earlier or slightly later whereas for a temporary entry, that's permanently deleted. If you access something, that has 128 ledgers worth of rent and it's gone on Ledger 100 26 it's permanently deleted and there's no recourse. And so I think. Because temporary storage and recreatable storage isn't being deleted. But it's being sent to the archive, that the bounds don't matter as much and especially since temporary storage and unique storage aren't intended to be used for these ephemeral kind of like + +[23:00] author security purposes there's not a security risk there I think from a user perspective what makes sense is, that from the interface expose paying rent in terms of ledgers. Because I think, that's the language, that developers will understand. And so I think what we'll probably do is say Okay I want to bump this by a thousand measures or 10 000 ledgers or whatever and what we would do is take the current bucket list price do an estimation. And then I think what we could also do is maybe add an optional parameter to the rent bump, that says I want this much wiggle room. So say like I want like 10 000 ledgers worth of you know rent. And then put another 200 XLM in there just in case. And so I think this works pretty well and I think, that you know even, though the rate is variable I don't think it will the growth is bounded and also I don't expect the growth I don't expect it to grow or change quickly. And so I think + +[24:00] if it's an entry, that's being accessed often and you're doing like this. If check on every access, which is kind of the recommended design Paradigm at this time. Then as long as, that entry is being often used it doesn't really matter. If say your five or six ledgers behind where you thought you were going to be you can still do, that check and. If it's the frequently used entry catch it in time. And then bump the run again. And so I think, that's kind of makes the most sense is, that for temporary entries they have strict time bounds or recreatable and unique entries you pay rent in terms of time they're not strict Karen I was wondering. If I could also mention something or maybe even ask you about this yeah sure well it's just, that you've been talking about these sort of arbitrary numbers of letters and. If my understanding is correct here and there's no real future in, which we're going to be deleting + +[25:00] something at any point other than a bucket merch right I mean we could factually pretend it had been deleted. But it's not actually going to be deleted until a letter merge happens or sorry a bucket merge happens right yeah correct okay. So so those there are only you know 10 possible boundaries where, that happens. So like it's not like you have arbitrary granularity here to sort of practically worry about you basically you have like and most of those granularities are actually quite small with it the bucket list is a little funny in this respect and a lot of the levels are actually quite short they're you know 10 minutes 42 minutes 170 minutes all the way to level six right. So assuming you have any level of permanence to your problem, that you have a thing, that you want to keep alive for any serious amount of time you know, that you're probably only + +[26:00] dealing with level eight nine or ten granularity right probably nine or ten granularity, which is you know little non-granularity is basically seven days and level 10 granularity is basically 30 days. So like as far as I can tell the sort of the Practical usability of this at a user interface perspective all possible values, that you could set Rent to are practically probably going to collapse into some number of weeks or some number of months and you'll just pick it up a weaker month number and, that's, that's sort of like you can imagine there being some really complicated pricing mechanics in here. But like practically speaking I think you know level 10 merges happen about once a month. And so those are the only times in, which you're likely to be talking about being expired is, that does, that sort of fit the user interface model, that you have in mind or am I just sort of missing the point yeah no, that's true. So we only do we + +[27:00] only you know we archive or delete entries during merge events. And then we're also conservative in how we charge rent. So this is also detailed and a document, that'll be shared later today it's kind of like the first CAP on this temporary storage and rent proposal. But the way, that we charge rent during bucket merges but. Because the fee is variable we can't be up-to-date on charging rent. But we have to retroactively charge rent just. Because buckets are immutable. And then merge result of buckets are produced in advance. And so you can't charge rent until every Ledger's rent is known. And so for, that reason essentially. When charging rent you lag behind one level. So for instance. If an entry is live in a current bucket at level four it has only been charged rent up to and including the ledgers app passed to The Bucket List level three it's a little complicated all let's say we are + +[28:00] conservative. When it comes to archiving and so I think from a user perspective even. If you know the price increases and you're off by a little bit more likely than not. Because of the conservative nature of our rent charging scheme it is significantly more likely, that your entry will live longer than intended instead of shorter than extended. And so all this talking about like it does it get you know archived like a couple hours early this is kind of the most extreme case where you get really unlucky or. If you have like a small amount of rent, that gets archived at like level six or seven. And then you know the bucket list grows the maximum amount possible for every Ledger between creation and archival. And so this is very much kind of worst case circumstances in the average case your entry will most likely live longer than the rent, that you supplied yeah and I guess what I was getting at is, that you're it's likely to last a month longer all right + +[29:00] like. If if you make it over the line by a bit you're probably not going to get touched for another month. If yeah. If you're looking at a certain last level of merges yeah it's a little bit I'm trying to understand it's great in suggesting, that we change the interface to rather than be like arbitrary rent deposits you actually point at, which like, which next level do I want to this to die on basically no I'm not suggesting a change of interface I'm suggesting, that the thought process, that the user is going to be going through to try and calculate rent will realistically probably only have numbers, that are multiples of 30 days worth of rent. Because there's the next lowest multiple, that has any meaning is only seven days like they're just they're I'm saying, that they're you're you've kind of got a like a weird number system where you can Target specific + +[30:00] ledgers. But it doesn't I don't think it's ever gonna I don't know I need to think about this more I guess sorry. But wait you're saying they like don't have to worry about being five minutes late on their rent they'll like have they'll be these big chunks of time it's kind of like paying rent on an apartment it's like every 30 you'll have 30 days of expiration at like this super granular thing. So it'll be easier to deal with is, that kind of what you're saying, that's, that's what I was asking. But I think Karen's garen's point is, that we are going to be doing it on every possible level it's just we'll be one behind and. And so to kind of give a concrete example for this say you have a level, that contains like 30 days worth of rent. And then a user pays 31 days with a rent in order to be archived you + +[31:00] have to be at zero. When that merge event happens and so. Because after 30 days a virtual event happens. But the entry still has one day left rent the entry is not archived and goes to the next level. And then say the next level is like 60 days it will take 60 days for, that bucket to merge and so, that entry will essentially live rent free for 60 days. While it waits for the next merge event and then, that merge event will check and say oh you're zero you can be archived. Now and so, that's what I mean by one level behind and we have a conservative archiving and rent approach I think, that's kind of what I was getting at was this idea, that. If it's not like where it's. If something survives a level 30 merge or oh sorry level a level 10 merge, which is once every 10 every 30 days is level 10 merch bottom level of The Bucket List gets merged every 30 days so. If if an entry survives one of those we're not going to like suddenly notice + +[32:00] you know, that it's one day late the next day we're only going to notice, that it's overcharged 30 days, that it's past due, that. If they sort of ran out and doesn't have enough to survive another month 30 days later yeah, that's correct I think, that's going to be almost every entry in this system is going to be in, that circumstance right I think almost everyone who interacts with this system is going to pay at least one month worth of rent and. Therefore they're always going to have like essentially a one month of wiggle room in being wrong right I think, that's kind of what I was trying to get at yeah and I think from a design perspective that's nice to have some wiggle room. But also from a design perspective I don't think our primary goal is to get rid of these like you know long life entries, that will never be used. And so like. If we like are off by a month or two months like we don't really care it's like we're trying to really like limit Ledger State size and get rid of these entries, that last for years, that don't do anything exactly + +[33:00] exactly the whole point here is just to have some upper bound and it is like having an entire month of wiggle room is completely fine from the operator's perspective you just don't want to have you know fi like we have entries in there, that have been there in there for years and they're never going to be touched and those are the ones we kind of would like to like oh come on can we just can we at least put these on like a cold or storage and I also I think pyro, that's an interesting question as to can a malicious user exploit this for free rent and the answer is no it's a little weird. But essentially in order to get rid of programmatic exploits where essentially you pay like just enough rent to survive for the next level. And then like always do, that like in a for Loop over and over again the way, that we service archives is whenever an entry goes to zero it is eligible for archive. But we archive in batches. And so essentially what we do is we use bucket + +[34:00] hashes as a source of pseudor Randomness. And then randomly pick batches to Archive. And so to answer your question. While a user could get some free rent kind of like in like the. While they're in like the in the one level below. So say like. Because we have one level behind. If a user pays five levels of rent they'll live until level six. But once their rent hits zero the archive entries are deleted in a pseudo-random order. And so we're doing, that to prevent programmatic exploits where you have something like a smart contract, that can just in the for Loop can continually bump the minimum amount rent to live until you get to, that bottom level. And so there is still some room for malicious exploitation but. Because we service in a pseudo-random order I think we're kind of limiting bad taxes related question actually to, that you + +[35:00] know the batching thing, that happens for actually charging rent I mean not charging around sorry for archiving so. When it comes to those ephemeral entries isn't the experience going to be similar where the you know so, that would be in conflict with having a you know like an exact expiration for a an entry to get it deleted. Because I'm going to actually do, that like you'll have to basically what we can do. When you. But we can enforce. When you know create an ephemeral entry you can say okay you know this thing is going to leave exactly I mean all yeah some number of Legends right and then, that number has to kind of line up with probably you know the right merge number right in the bucket list I + +[36:00] mean, that's maybe and, that's a minimum as far as, that is it the maximum I don't think we can actually do a maximum. Because if we do a maximum, that means, that we are. Now creating those big waves of deletes, that will all happen at exactly the same time right and, that becomes kind of a probably like a scalability problem. Because because kind of like the deletes right, that are happening for the or the archive events right it's kind of a similar thing where you don't want to do all the archiving all at once. Because Downstream systems I mean you have to tell them like, that the thing got deleted and you can't just do like a you know millions of entries getting deleted all at once I actually disagree and I think for two reasons. So I think first off we don't want to have I don't think we should limit users to The Bucket List + +[37:00] time bounds just. Because there's like they I feel like to the outside users it feels some arbitrary like why do you have to pick between 4 000 ledgers of rent and 16 000 rathers of rent, that's a big difference. If I can't go somewhere in between. So at least how I'm imagining temporary entries is, that they have an internal field, that's like a death Ledger. And then after, that death Ledger even, though the entry might still exist in the bucket list they are not accessible. And so even. If you find the entry to look up. But the current Ledger is past the death Ledger. And then it's as. If that ledger doesn't exist. Now as far as the deleting goes I don't actually think there's an issue with deleting very large amounts of temporary entries and batches mostly. Because I don't think you need to admit meta or temporary entry deletion. So for instance the reason why we have to do archiving in batches is. Because archive nodes or state expiration nodes or whatever we want to call them. Now need to ingest meta. So they can store, that information. But for temporary entries + +[38:00] because they have like this TTL value inside of them I think they encapsulate all the information about their death. And so you don't need to admit any meta. So for instance a Horizon node right it doesn't and I could be wrong about the Supreme. If I'm wrong. But Horizon node on creation time knows what the TTL of a temporary entry is. And so the Horizon node or any Downstream systems don't need a meta event omitted to say, that the entry has died. Because they know exactly. When it's going to die similarly whenever we delete a temporary entry after it's died you don't need to write a deletion event to the top level bucket what you can do is during the merge say oh. If the current Ledger is past death Ledger just drop the entry and not include it and the merge result bucket. And so I think. Because assuming we have like a death Ledger field inside the temporary entry we won't need to emit meta and we won't need to write any delete or delete events to the bucket list. So I believe we can do this with arbitrary size of + +[39:00] temporary entries and we won't have issues with like a very large bash being deleted at the same time. If that makes sense what kind of I mean, that means, that the downstream systems have to maintain the you know and scan the entire Ledger for those expired things right like, that's kind of overhead yes I guess, that's the question is, that we need to admit meta or not even. If the death Ledger is included in The Ledger entry why can't it be just like a logical thing like right like it's an entry. And then basically cost knows how to treat it properly right or whatever yes we can do, that it's more like. If you think of like servant or PC or you know Horizon they would have kind of manage with, that stuff like either they do it on access. So like delete they see the thing is deleted or you know like past the expiration date. So they kind of did it lazily or they do + +[40:00] have to delete like do a background you know a thing of thoughts right to kind of discover the things, that, that are actually expired I mean I don't know how much overhead, that would be to have like a debate on access. If it's out of date. And then also just like keep a auxiliary table, that's like a list of temporary like pointers to Temporary entries sorted by death Ledger. And then just have a background garbage collection thread, that runs every. Now and again just to you know go down, that list in order. And then delete the entries, that have died you know I don't know how much overhead, that would be. But I feel like you know at least, that was like I was like trying. But my thought was, that would be like edema kind of like logic bound operation and not require meta events I feel like, that should be reasonably lightweight from an advantage of standpoint yeah it depends. If we think, that people are going to use a lot those temporary entries and basically like you know like you + +[41:00] you create those things. And then they expire. And then you know, that basically you can do, that. Because nobody is going to try to access them I think for Downstream system, that implication is, that you have I mean the more likely scenario is, that yeah you have to do some sort of garage collection. Because delete on access is not going to cover a lot of the cases where you know you actually would delete stuff yeah. So yeah this can be potentially quite a bit of shown to I mean all stuff to do. But like maybe I would like to hear what you know people on the platform side think about this or like OrbitLens I don't think it's around unfortune taking a step back for a second I do think I'm very concerned about this + +[42:00] this bifurcation of how rents rent is measured in the dues between the temporary storage and the recoverable storage rent is already a pretty novel concept and I think we're treading unfamiliar territory. So having on top of, that different mechanisms whereas temporary storage is measured in number of ledgers. And then the recoverable ones are kind of like Dynamic I think, that's weird I understand why you want to do, that. But I think from a user interface perspective from the developer perspective it's going to be pretty weird and you know not only, that. But I do think, that. When a user pays for rent they want to know you know conceptually they want to know exactly how many ledgers they're paying for and this idea, that you know things vary based on the size of the + +[43:00] bucket list, which is an implementation detail from their perspective I'm very concerned about, that. So one possible solution to this is to essentially have the rate be variable. But whenever you bump an entries rent you lock in the rent fee for, that entry until the next bump. And so I was kind of imagining this as like a subscription model where. If you pay monthly it might be 15 bucks a month but. If you pay yearly. Then it like amortizes to like 12 bucks a month. So you get a small discount. And so I think this might work both to provide better guarantees around the expiration date for the recreatable and the unique storage types and also still have kind of like a variable game three aspect to it. Because my concern is. If you have a fixed rate. Then you can have people, that like essentially like say I'm going to like lock in this entry for 10 years put up 10 years of rent and then. If it increases like. If the rent fee increases + +[44:00] like five or six x in, that time you've got a ton of free rent. But I think there's. If you like reset the fee every time you rebump it kind of makes sense from a fairness standpoint I feel. Because yes you might be getting a better deal on rent. But you're locking up more funds. And so I still feel like the discount might be warranted. If you're willing to lock up a Year's worth of rent all at once to have like this fixed rate, which I think, that's one solution to provide a better user experience and this would be very easy to implement as well I just don't know. If that could be games or be detrimental to the health of the network having these like locked fees and having entries, that are essentially like charged different fee rates based on the last time of their bumped. If that makes sense. But what are the some thoughts on, that particular idea in, that particular fee proposal yeah I think, that the more you create like this type of discount systems the + +[45:00] more you create an incentive for people to be like meta aggregate you know like some sort of middleman contract, that you know buys a bunch of space. When it's cheap. And then you know you can use it you know like you can basically resell it to other people or something like I guess. If it's. So for this would be the read-only. So maybe this yeah for stuff, that's read only maybe I mean this is maybe fine maybe this works like this wouldn't work. If you allow people to basically yeah like and okay allocate you know with a bunch of zeros right like stuff. And then later there's like a better deal. Because it's you know an update. But in the current model we don't actually distinguish a create from an update I think it might work actually I don't think there is a good way to sell with + +[46:00] your speed just. Because it's owned by the contracts and yeah using other contracts storage and highly problematic and probably more expensive than for sun ran savings what I wanted to ask in this thing is how much do we actually expect rent cost to function. Because I'm kind of thinking about the model, that we say hey there more entries are in the back at least the more expensive with the rent. But then you know his friend goes up we should evict entries from the bucket based and suddenly becomes smaller or yeah I'm not sure. If it is a good reason or not. Because like we have just evicted a bunch of entries right to make it + +[47:00] smaller. So so basically is it true concerns those questions here I like how much do we actually expect, that us to block trade at all and whatever we think it is higher level awake in a way, that you know some brand can play like you may happen this happy 10 period. When a bunch of entries were evicted and suddenly rent is cheaper for you. So it costs someone inhibited. So I think there's two things here well first it's like you know again back to napkin, that math I don't expect it to grow very fast. But also one other thing is, that there is. Because we are evicting in batches The Bucket List kind of shrinks at a fixed slow rate. And so like for instance. Because of the batching system there is no way + +[48:00] for like say like a hundred thousand or like a million entries to be batched in one Ledger. And then all of a sudden the Buckle is price dramatically decreases and you saved a ton of money. If you would just like put your or create your entry or paid rent like one Ledger later, that's not possible. So I think generally speaking the way, that the fee structure is set up you have a slow growth and slow Decline and you shouldn't have ever have like any of these big jumps either like a big jump up or a big jump down it's more like over time you can see significant increase or decrease. But you know between individual Ledger or on a short time frame you should only see gradual declines and gradual increases you know of those Temporaries you know like. If we depending. If we allow Temporaries to go all the way to let's say level nine you would you could actually see a big drop in natural space in larger size you know + +[49:00] with basically the. When you adopt the bucket, that has all those things actually deleted and yeah I'm just trying to think about it from the perspective of one create entries and you know I'm thinking about like how big is, that downside of like doing the entry lifetime basically time based and not fan page so, that you know. Because we have discussed in the Summers right with your pain for a certain time period beforehand right. When you're pumping the rent and we would edit the central based on this claim period and not based on the variable Fury. So even you know the fluctuations and the price and the soil grows I wonder how big is the impact of this approach + +[50:00] would be compared to the doing completely Dynamic Grant, which pronouncement in the end and upon average being the same as you have paid at the current rate before and for the whole period I think, that. If you go and I think this is probably a number, that makes sense right well. If we say okay let's yeah fix you know rent in granity or like I don't know like six let's say six months yeah I don't know some number right what happens with, that is the you end up with basically biasing Grant so, that the newer countries like rights basically to The Ledger become a lot more expensive than existing things like you have like this you know things get + +[51:00] grandfather a little bit right in the system so, that's where like you know. If you pick, that number let's say you say like okay you know you can you get fixed run for like five years right as a way to kind of think about what happens right like you basically are locking people, that create entries. Now right. When the Ledger is small to have like a really good deal and let's say in four years The Ledger is much bigger and storage is much more expensive at, that point those entries, that prepaid like you know have a very big Advantage compared to entries, that we try you know, that try to get added to the Ledger at, that time. If you make, that number like depending on how big, that number is you create + +[52:00] potentially this big bias right yeah, that's, that's yeah exact location I understand we have I'm just wondering like. If you bounce or later size the rent costs have this has some bound as well right and let's say the reaches Bound in a year theoretically, that means, that you know your five years of rent the growth should be only during this first year. And then I mean four years would be it about constant rate I mean look at me like basically kind of a bit concerned about the mask going into this Ledger bounding. Because at some point you are going to reach the touch over you know you would be leaking entries and then, that's it's more or less constant. And then you don't expect much changes. So yeah I mean it's kind of weird like I'm not sure I understand the mechanisms, that would cause the like let's say they + +[53:00] are very close to the bound what will be the rent increase mechanisms, that pushes the entries out of the ratio basically. Because you know like they are near the bound already do we have it or not understand yeah I'm not sure actually you're saying you're asking. If so basically you want to rent you need to be a function of the ledger size right yeah. But we also want the virtual size to be bounded it's some upper bound right yeah. So essentially like the idea being, that the cost of writing a new entry and the fee rate as you approach the upper bound is asymptotic towards Infinity is kind of the intention + +[54:00] yes. But it's ensuring. But how does this work practice like what happens. When we are near zapper bound like can we not create any entries at all I mean, that's kind of it's not, that you cannot create entries it's just, that they become very expensive. So in practice you kind of stall metal isn't, that the same transfer an issue you have basically instead of like saying, that yeah interests were just created for cheaper here we say, that answers will just wait people were just able to get entries Into The Ledger. Then is prohibitively expensive to get new entries to The Ledger until the older entries are evicted yeah there's definitely an issue. If we have like I guess we'll call it rent control + +[55:00] I feel like there's definitely an issue. If you have like long life rank controlled items. Because essentially like kind of at the most extreme. If you had a lot of buy-in for like five or ten year entries. Then essentially your network would help. Because there could be no other additional entries created. And so you'd have to wait until these rent controlled entries default on their event, which would take 10 years at the most extreme. And so I think. If we go this rent control approach we'd have to make sure there's a very reasonable upper bound as to how much rent you could pay. And so like 10 years would definitely be too much I mean maybe one year. But I think we definitely have to Upper bound it to kind of avoid this network being flooded with cheap entries and not allowing any other entries to join. Because it's prohibitively expensive right. And then basically. If we need some kind of bond anyway like can we. Then consider, that time-based option. Because like it seems like even the current + +[56:00] approach we have issues with very long-lived entries ready to be what the interest is super long leads to entries like we get people's basically needs to learn, that exponential increase in rate. And then we get all of them evicted. And then we get them all or create deployment rates, that were created for very cheap. So it seems like it is problematic no matter what I want to say we are at time. Now it's an interesting discussion. But I think we gotta pause it Garen do you have what you need to move forward do you know what the next steps are is there any crucial thing, that's blocking you, that we need to address are you good yeah I think from my end I'm pretty good I have a timeline. And then this CAP proposal for rent and temporary interest I want to release. And then we can talk about more on the Google Docs I think this time-bound versus variable rate is still an open question, that needs to be solved + +[57:00] but I think you know there's still plenty to you know get done outside of, that and is your intention. When you complete the next iteration or the next draft to share it here so, that people can review it again what's the story with, that yeah. So this will be a public document kind of the ideas like we have the timeline. And then we'll be linking CAPs off the timeline. And so this is just the initial CAP of, that timeline. And so both the timeline and the draft CAPs will be publicly available awesome cool thank you so much well good discussion super interesting thanks everybody we will likely have one of these next week and stay tuned keep your eyes peeled for the drafts, that Garand will share obviously async feedback is also super helpful just to keep these discussions moving forward see you all soon + +
diff --git a/meetings/2023-03-30.mdx b/meetings/2023-03-30.mdx new file mode 100644 index 0000000000..9c37df688f --- /dev/null +++ b/meetings/2023-03-30.mdx @@ -0,0 +1,196 @@ +--- +title: "Contract Metadata and Token Interface" +description: "This discussion explores how Soroban should expose contract metadata for off-chain consumers, evaluates whether metadata needs protocol-level support, and reviews proposed changes to the token interface, including transfers, allowances, and authorization patterns." +authors: + - alex-mootz + - dmytro-kozhevin + - garand-tyson + - justin-rice + - nicolas-barry + - orbitlens + - paul-bellamy + - siddharth-suresh + - tomer-weller +tags: [soroban] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Soroban is a smart contracts platform designed with purpose and built to perform. + +This session focuses on two related design areas: how contract metadata should be represented and accessed, and a set of proposed refinements to the Soroban token interface. Much of the discussion centers on balancing protocol simplicity against developer ergonomics and downstream consumer needs. + +Participants weigh whether metadata should remain an ecosystem-level convention rather than a protocol feature, and whether token interfaces should stay minimal—even if that means pushing more complex patterns (like subscriptions or flexible transfers) into higher-level abstractions. + +### Key Topics + +- Contract metadata goals and scope + - Metadata defined as static, read-only information intended mainly for off-chain consumers + - Two categories discussed: + - Code metadata (embedded in WASM custom sections, e.g. build info, supported interfaces) + - Instance metadata (associated with a deployed contract instance) +- Where metadata should live + - Preference toward ecosystem standards using normal contract data entries rather than new protocol primitives + - SDK tooling can standardize how and where metadata is stored and retrieved + - Avoid introducing new protocol mechanics that must be supported indefinitely +- Cross-contract metadata access + - Considered a niche use case (e.g. reading token decimals for valuation logic) + - Most metadata consumers are expected to be off-chain systems (wallets, explorers, indexers) + - Cross-contract calls via existing interfaces are seen as sufficient for on-chain needs +- Security and mutability concerns + - Metadata that can change at runtime introduces risk and ambiguity for consumers + - Strong preference for metadata being effectively immutable or very strictly controlled + - Contract upgrades complicate guarantees if metadata is treated as authoritative state +- Rent and metadata + - Metadata entries are unlikely to be accessed on-chain, so they won’t get automatic rent bumps + - Manual rent bumping is considered acceptable since metadata is non-critical and recoverable + - No special protocol-level coupling between contract usage and metadata rent was adopted +- Token interface cleanups + - Admin functions no longer need admin addresses passed explicitly (fetched from storage) + - Strong support for renaming abbreviated functions to clearer, longer names (e.g. `transfer`) +- “Transfer max” / flexible transfer semantics + - Proposal to allow signing a maximum amount while letting contracts decide actual spend + - Aimed at improving composability for trades and refunds + - Ultimately deferred to keep the core token interface simple + - Current strict-send semantics deemed sufficient for now +- Allowances: persistent vs temporary + - Persistent allowances recognized as risky but widely used and familiar + - Temporary (transaction-scoped) allowances seen as safer but less generally useful + - Consensus leans toward keeping persistent allowances with strong guidance and warnings +- Account abstraction as a long-term alternative + - Subscriptions and recurring payments better modeled via account logic, not token allowances + - Concepts discussed: + - Signature-less authorization via on-ledger rules + - Extensible or modular account contracts + - Acknowledged as architecturally cleaner but higher complexity and adoption cost +- General design philosophy + - Favor minimal, stable protocol interfaces + - Push experimentation and specialization into contracts, SDKs, and ecosystem standards + - Document risks clearly rather than overfitting the protocol to edge cases + +
+ Video Transcript + +[00:00] Everyone and welcome to our weekly Soroban Design Discussion it sounds like Justin has some AV issues. So I think we're gonna get started without him today we have two main topics to discuss there is metadata and there is changes to the Token interface I think we're just gonna jump right into it with metadata sit are you gonna take this yeah oh yeah. So you know broadly metadata is just a static data, that can be read by consumers without calling it without calling contract function and invoking a awesome VM. So there are two levels of contract metadata one is the code metadata right in sorbine we have this concept of these code entries, that have like the contain wasn't. And then we have instances, that are tied to specific contract IDs and those instances + +[01:00] will also have some metadata tied to them. So for contract metadata our plan is to put them at it into a wasm custom section and just add support in the SDK to allow for this. So for example you can you know. When you build a contract you can the SK can put in like build related information the questions we still need to answer are more around instance metadata. So the easy option is to not add any explicit protocol support instead of instead have an ecosystem standard on how metadata should be stored in contract data. So you know whoever creates the contract can follow the standard and just put you know a certain information into the Sentry and you users will know where to look just based off of the standard on the SDK we can build tooling around in the SDK to allow for this there's the one question here is how would paying rent on this entry look. Because this entry is not expected to be + +[02:00] accessed on chain so. If that. If you know for no access is Unchained. Then it would be very easy for it to expire. So so you know one solution to this is we can provide a mechanism to tie the contract instance to any contract data keys, that the contract developer wants. So whenever you pay rent for the instance it would also pay rent for those keys, that are attached to it, that's one option. And then another option for instance metadata is to include an explicit metadata field in the contract instance and the contract developer can write into, that this is still only standardizes how like where metadata is stored not how it actually looks like in the protocol. But this would also. If we do this we can also allow contracts to access metadata for other contracts without calling it to a awesome VM like we could expose, that information + +[03:00] because right. Now contracts are not allowed to look at and look at data from other contracts without calling into a wazen VM like you have to go through or contract function. So you have to go through the interface of the contract we could instead allow contracts just load directly the metadata field from another contract instance allowing you to you know look at you know information like for their example in the token you can look at the decimals or the token name. If you want to without calling a contract function. So yeah, that's, that's like the, that's the one thing where we're still thinking about for metadata. But I think, that's, that once we figure, that out we should be able to move forward on this any questions what are some examples of scenarios in, which contracts need access to other contracts is metadata. So it's not a yeah this won't be like a common scenario but. If a contract wants say the + +[04:00] token's decibel number for some reason right like we expose, that in the token interface right we have the name the decimal function and we did, that. Because there is no other way to get, that right we don't. Because we don't have metadata right. Now but. If a contract wants, that for some reason right this would be a way to get it I don't think this I don't think we need to optimize for the scenario where you know you want to call into metadata in a contract without calling through a contract function. Because it will be rare and. Because of, that you know it's fine to expose, that in the interface and call into a contract function. But yeah like, that's one example it yeah. But my like I'd love to learn about like concrete uses for this. Because it looks like all the metadata uses, that we've been discussing are mostly for Downstream systems it's not for other contracts to consume and. If if, that's the case. Then you know I definitely agree, that having just a standard metadata + +[05:00] you know Ledger entry and a way to access it is kind of like makes the most sense for everyone the question is like are we losing like this important optimization here yeah I'm looking at the Oracle consumer interface precept let's try it in the store Dev I don't know. If everybody has seen it here it basically tries to come up with a standardized historical interface and you know it has a bunch of functions like base asset the list of supported assets decimals resolution, that I suspect may be useful. When you use the machine for the Oracle users correct me. If I'm running as into modes and OrbitLens here. So yeah let me know what you think about contracts for example like it seems to me like this property seemed to be useful for some function + +[06:00] operations and you know optimizing just cases would be generally beneficial. If you are talking about the Oracle contract interface in particular I think, that we need to Define the validity of those values for example. If we expose decimals and other let's say static properties of the contract in the contract itself via methods in the interface all other consumers can pretty easily fetch this data. But for all other options I don't see, that + +[07:00] it will be as optimal for example. If I understand correctly contract code can be upgraded and in fact during the upgrade we can lose those values or they can be replaced with some other values. So from what I'm seeing right. Now I don't like see any benefits from providing some external access to this metadata in vasmo contractor entry or data laser entries and definitely it shouldn't be installed on. Because store Tom is bringing the logic outside of the system + +[08:00] and it really has a lot of problems in terms of usage from the consumer perspective. If if a token, that store tomb is one of the options on the table. Then we can just expose the static values on the project website and, that's it. Because even in, that way it will be probably more reliable than using Stellar tool yeah I don't think store Thomas on the table. So my question is rather like what do you think as a user of an oracle contract for example as a contract user as it is would you be ever interested in calling functions like base asset or decimals or list of assets I say modes has posted on the CAD, that contracts are expected for example to fetch decimals from tokens + +[09:00] which might be the case as well right I think it's probably like one time only calls. Because most of this methods functions you mentioned they are actually a static right. But anyways I think, that for consistency they should be there. If we can for example expose some of this values in bathroom custom section Etc, that's good. But still I think the easiest way from the perspective of Downstream systems is to just pull or pull this information directly from the contract via the contract function call I think it's possible + +[10:00] to come up with some special cases where this where these functions will be called like much more regularly for example. If the. If there is some proxy contract, that Aggregates data from let's say a dozen of different Oracles. And then this functions can be called for example on every access to the underlying systems. But I don't really see whether or other like storage options for example control entry will be better than a control in this regard. Because it's more flexible + +[11:00] flexible I mean it's more flexible and less efficient rate you know you need to make n plus one or n plus two contract calls instead of n you are spending quite a bit of gas and, that. So against Thomas question was whether we ever meet this data machine it seems like yes this sometimes just like just explain where I'm coming from I just feel a bit of disconnected we like a lot of the contract interface isn't exposed a bunch of this meta data-ish functions and you know there are usable both on chain and off-chain and it's not really efficient to use them either on chain or off-chain, which I would consider a swipe slightly yeah an argument towards like you know having this instead exposures static metadata, that is readable by anyone writable by the owner contract. But I + +[12:00] have a strong opinion on this. But it just feels weird to me, that we like have a bunch of this static contract methods, that are kind of tied to the contract instance and you know they will always just fetch an entry from The Ledger like it seems like in his optimization for me. But you know I don't feel like it. So it sounds EMA like what you're suggesting is like a step towards you know basically like exposing kind of like API by shared memory, which I think for like non-meta-data use cases is you know is a bit dangerous like maybe the contract shouldn't actually expose you know data through like a shared memory structure and. If I look at the functions right. Now especially on like in the token contract they're mostly there for Downstream systems and I did want to + +[13:00] ask moots can you give us examples of. When other contracts actually need the decimals, that sounds a bit idiosyncratic to me yeah I guess probably the best example I can think of is in any system, that has to attempt to Value some collection of tokens, that may or may not be like fully defined and or can change in the future an example of this would be Ave at any point in time in the future it can choose to add a new token they probably don't want to assume, that in the Ethereum case all tokens have 18 decimals for example they have USDC, which I think in Ethereum only has six. So in, that event. If you are trying to consume some common price to compare all these Assets in you'd have to take the decimals into account otherwise your price would either be + +[14:00] severely inflated or deflated based on whatever number of decimals you've been assuming so, that would be kind of like trying or I suppose trying to Value some set of tokens would be really the only case I can think of, that decimals would be needed Unchained. But it's a fairly common one and Ave actually does, that it calls the decimals function yeah you'll some people like for example balancer does something kind of interesting where they actually normalize all the decimal values and just assume they're 18. Which is an option too. If you want to avoid trying to call decimals. But just. Because oracles don't always list prices in normal like the same denomination you would need to use decimals to ensure, that you're actually getting the correct one got it. So it's mostly kind of like normalizing data coming from oracles + +[15:00] correct yeah it just feels a little bit weird to me, that well like the functions for example are metadata conceptually right and you know it is urge, that we expose it as a function. But we are not going to use a tone chain whereas we are proposing and just you know hey let's use some standard like we have this Ledger entries is the metadata to use of chain. So actually what I mean here. So basically we have functions. But we don't use them on chain. And then for off-chain we have standardized metadata entries. Then there is a question like why do we even expose this functions in the first place or maybe vice versa why we buzzer with defining the standard structure for metadata at all. If we provide functions for, that we say like hey just you know. If you want to + +[16:00] expose any metadata exposure to this is a static function it's hopefully reasonably cheap to call off-chain anyway well I think the answer is the exposed functions today. Because we don't have the concept of metadata right. So the question is like. If we do have the concept of metadata. Then we will probably reduce the number of functions to those, that are actually usable on chain and you know things like you know the name of the token probably not usable on chain I don't see a reason to expose, that. So so like I you know I would error on the side of like not introducing new mechanics. Then then introducing your mechanics and whatever we can do with defining ecosystem standards is better than making you know protocol change, that we need to live in Forever yeah definitely I like I'm all for like you know letting more stuff to be resolved like of protocol of chain + +[17:00] just, that it's a little bit weird, that we have two ways of doing the same thing. But you know. If our guidance is hey like. If you don't ever expect this to be accessed unchanged just use a ledger key to unplug the interface and things, that's probably good enough yeah and I do think it's an optimization for Downstream systems right like we don't want wallets to like constantly be calling you know running like wasn't code to determine like you know decimals and asset names I think, that if. If we are going with any of the let's say with any static storage for this metadata it should be read only. So it's like it's set for once and never changed. Because otherwise + +[18:00] from the consumer perspective I don't really understand. When this data can be updated for example and many ways. If it's stored not in the contract data. But in Contra closure entry or somewhere else I think, that in most cases consumers will have to call it like every time I understand, that it's cheaper. But still. If we are decoupling this from the standard contract storage. So it's probably makes sense to ensure, that these values are you know for example. If they're created in + +[19:00] contractual entities and they should be created well during creation and the never changed so, that's an interesting point. Because we will allow for updating contract instances or sorry you updated contract instance to point to a different contract code entry, and now I think ideally you would update in a way where the metadata still holds true. But but yeah it's a completely coupled like no matter how metadata is stored with shouldn't touch it. When the source is being updated and to be clear the sort of metadata we've been talking about, that is attributed to the bathroom Source this is not like the data about like decimals or anything. Because it's not an instance it's more like this has been built this version of sdkx or this implements interface Y and not merging + +[20:00] that. So you know. When we update it yes it will be updated. But like it is expected like. When we updated yes it will be updated. But like it is expected we give the implementation migrated to a new SDK version on you send it to interface I think this is what you want to observe you're talking about code metadata right I thought yeah. But but I think yeah there was a question about like hey what. If he updates the contract implementation. So what I'm saying is, that you do not touch instance metadata and old metadata itself is expected to be updated. Because it just describes the code. So but. But do you allow for updating the instance metadata I get the contract the code metadata it depends on the approach right. If you go for the contract storage just the normal contract data Ledger entries it is updatable and the same is true for the current functions it exposes right click, that prevents you from changing the token + +[21:00] name currently. If you go for yeah another approach we can make a trade only. If you want okay. Because I think, that's what orbit just brought up okay. So it doesn't sound like we've like decided on you know. If this information. So like you know we can actually yeah someone mentioned, that the metadata we don't have metadata at the moment. But technically you can still look at the token and figure out where the balance entry or sorry where the decimal entry is stored right and what we're saying is. If you do this all off protocol. Then you would just. figure out a set ecosystem standard for where the decimal entry will be stored. Because they'd already exists today we would ideally put the decimal name and token all into a single metadata entry, that, that's like determined by the ecosystem not by the protocol right. So it's not like we're going to you know add Like A New Concept + +[22:00] of like a new type of Entry or add information, that doesn't exist today it's more about where we put the information, that already exists on chain today and just in a way where we can where it would make where it's easier to retrieve right yeah and. If I try to paraphrase what Dima said earlier at, that point I think, that the argument here is like not whether or not to add this to the contract entry itself. But assuming, that we're not like do we actually need to standardize this or you know do we just use what we have right now, which is functions. Because you know like more ways to access data is like more dangerous or it's it creates like a bifurcation there yeah and, that's specifically for the like your question is specifically about the cross-contract case right + +[23:00] yeah to be clear I think the cross-contract case is actually like I recognize, that there are use cases for, that. But I think they're like fairly minimal and I think most metadata, that we've been talking about is never going to be consumed by another contract yeah I agree I think we need to consider the separation of security for this to Value. So let's say. If we talk about the value, that is stored and Modified by the contract itself. Then the security lies entirely in the field of the code written by the developer but. If if we + +[24:00] introduce another way to store some metadata would be randomly or some other let's say editable metadata we need to think about security considerations here as well. So we need to provide information and strictly Define. When can will be changed and under, which conditions. Because again. If you are talking about oracle's price modify as decimals may have like disasters effects for all consumers orbit I didn't understand from, that what your perspective as you said. If we're introducing a new mechanism for accessing metadata do you think we should add we should introduce like a separate mechanism for Access I think we can introduce and it's okay. But we can + +[25:00] we should Define very strictly under, which condition it changes. So there are like a few options described in the prairie dog and I should probably focus on these options, that as I provide a reasonably access to this metadata or at least provide strict security guarantees, that this metadata won't be changed by someone in the runtime. Because contracts for example can have like other more Street security grantees. Then the deployer + +[26:00] the deployer contract or deployer account for example it's something, that should be considered the like the more we talk about this the more I feel strongly, that we shouldn't introduce like a protocol feature specifically for this and I think, that the functions yeah the cross contract calls facilitate all of our anshay needs and an ecosystem standard for metadata can help Downstream systems just know where something is instead of calling for it. But but I don't think we should introduce new protocol mechanics here yeah and to add to the concerns, that Sid has mentioned at the very beginning regarding the rent payments for those entries, that are never going to be attached on genes supposedly I think + +[27:00] that the current iteration of the Ring proposal kind of takes care of, that in less reasonable fashion. Because you can pump print and arbitrary entries. So you know. If you really care about your token contract you can just bump the rent on its metadata every once in a. While and the good thing is, that metadata is not like something, that has to be there to make your contract upgrade. So not critical to maintain rent a bit. But it should be possible. So it's not a concern as well it does have to be a manual process, though. And so I'm wondering like. If it would be wise to maybe some outside the metadata to the contract instance itself. Because right. Now you currently have a rent bump whenever you access either read access or read write access. And so I can imagine a scenario where like the contract is used on chain a lot. But then you know. If you don't manually call + +[28:00] this bump. Then even, though it's being used on chain the metadata can get archived and default on rent. So I'm wondering. If we should tie metadata somehow to the contract instance such, that. If the contract is actively being used the metadata won't get sent to the archive and you don't have to have this manual intervention. But Garen is isn't this problem relevant for any like Global state, that a contract has like this is not something unique for metadata even like you know like pool values and a liquidity pool will you know we'll need similar mechanisms. Now this is true. But I feel like mad data is just somewhat unique. Because it's just never used on chain right and like. If I feel like for most contract data types. If they're being touched or. If they are like relevant for onshine operations. Then they'll be accessed and they'll have the rent bump. And so I feel like, that's like the one issue with the metadata is, that. Because it doesn't have an on-chain use it will never have this automatic bump. Because it should never be accessed by an on-chain + +[29:00] contract, that's a good point it's like in the liquidity pool thing like it will be accessed. So it will have this automatic bomb. And so I'm just worried, that you know it's like a weird interface where we have like essentially you know especially in the new brand proposal where we have automatic rent payments it just feels weird, that rent is paid automatically for you know anything, that's relevant. But a very popular contract instance would still not automatically update the Mad data entries. And so it just feels like a poor user experience oh I mean in the current proposal right you still can Bounce Around manually rate for any Ledger entry you want, which is what I refer to by saying, that we have a solution for, that I'm not saying it is perfect. But you know since we say, that hey this sort of metadata is for the off-chain usage anyway I don't see this too offensive + +[30:00] that you know you need to craft rent bumps of chain as well as opposed to the automated months I mean yeah it's probably not the best ux. But since like the cost of defaulting our rent is really low here not impossible to recreate this entry. If needed from the archive. So I feel like well it may be of a minor annoyance it doesn't seem like I'm super critical. But I mean you can also get creative and do the merging of the data, that is being accessed and chain and, that is purely metadata just by implementing it in a certain fashion in the contract like you can have a single entry, that mixes both used. And then use data. And then everything will get pumped together, which is good I guess + +[31:00] we have a great like this can be developed off protocol. If needed yeah like my general sentiment is. If this you know. If this thing is still alive there would be at least one entity, that you know, that cares enough to put you know quarters in this meter okay. So it sounds like we where we are at the moment is, that we, that the metadata won't be exposed directly on chain and, though we will say for now, that rent will just be manually bumped on these entries is, that right yes okay all right I don't think there are any other questions around this + +[32:00] so we can. If no one else has any other questions we can move on to the next topic yeah. So this took an interface I don't have much do exposes. But basically it's a couple of things, that we haven't still done with the token things right across next and some of these things are pretty and uncontroversial I think like for example all the admin interface doesn't require admin address anymore to be passed to it. Because we need to fetch admin from the storage anyway. So we can directly call request it the next thing, that is slightly more controversial I guess and interesting is this is next it is much easier to make + +[33:00] outright subcontract course on behalf of someone. So you can the user can call a contract, that calls a token transfer for example and it is a useful feature. But regular transfer is quite Limited in the sense, that you can only send a preset amount and this is the exact amount, that you have signed so. If the example. If you have some trading contracts, that doesn't perform a full trade it will need to come up with a custom refund Logic for example like. If the price has dropped down compared to what you have authorized and so, that cases we want to introduce a function like X for Max or maybe expert Contracting proposals welcome and this function would basically allow + +[34:00] sign-in as maximum amount and signing the address, that is all tries to transfer up to, that amount. But then the call itself accepts the actual recipient and the actual amount, that of course have to be not have to not exceed the amount, that has been signed by the user, that allows to do something like Xavier has signed transfer of 100 to XLM and during a trade we figure, that actually they need to spend just 95 XLM to perform the trade. So the contract can do just a single token call X4 Max with 100xl and authorized and 95 XLM sexual value. So I guess interesting question like the proposal like, that seems a + +[35:00] reasonable at least from the previous discussions I guess the question for this particular proposal is how many extra functions are we going to have. Because currently they already have week two x four and extra from this using allowance, which is like the next topic. But you know maybe we should try conversion for example have the normal extra Behavior to be always like this extra Max I have described like you know you always sign the actual value of the transfer. But then like. If you are doing just a simple payment you can bypass Romanian arguments or duplicates and something. And then you know combination from contract you can decide or we can have two functions. So I guess, that is the question here I'm interested in some opinions. So I guess the trade-off here is either more complex interface or + +[36:00] simpler interface with less functional. But this function itself is like more complex just to make sure we're all on the same page in current semantics. If you want to do like a trade, that is a you know a strict send. Then the current expert function is enough right yes okay so. If you want to do like a strict receive in, which like the other side of the trade is guaranteed. Then the like the send amount can be flexible yes got it out of curiosity what is common in the world of you know popular a m's on Ethereum for example do they work on like strict send or strict receive semantics, that's anyone or no it's not really either sadly you kind of + +[37:00] put in a price and you put in a slippage tolerance. So it's likely more it's I guess yeah no it's definitely strict send with the slippage tolerance got it. But the amount you send is set yeah it's just the amount you receive is undefined up to some limit yeah I think this is function of requirements or is this a function of the you know for example token interface and Improvement and interface yes like I can see eyes relate. Because I know, that some things are definitely by product of the current design restrictions certain blockchains. So I wonder. If here this is a really business requirement or just something, that is a product of token Heaven only strict sense + +[38:00] yeah I hear you and it is difficult to know I this is anecdotal. But I can tell you, that throughout the beginning of Stellar we only had like strict receive semantics and, that was extremely confusing and. When we've added stricts and most people kind of like migrated to, that. Because it is and this is an assumption I think it's a bit easier to perceive you know I'm gonna put this amount in and I recognize, that the amount coming out can be like dependent on market conditions the opposite and like the other side I think is a bit more confusing to people. So I don't necessarily think, that having only strict sense semantics is, that bad I am concerned about having an interface, that is a bit convoluted and has you know too many expert functions by the way can we please stop saying expert and like move to transfer functions will, that happen soon + +[39:00] yeah actually as a pair of token in European discussion I think we should migrate to longer function names I know, that we allow up to 32 bytes we should collects for transfer and you know rename also terribly named functions, that they're trying to fit into, that character. So yeah I think this should happen yeah I think in the chat everyone is excited about this yes let's have more understandable longer function names please. So is there a proposal here is just not do it at all and live with only a strict send transfer. Because it is true, that it is not impossible to just like even. If you need restrict received behavior and even. If you don't use allowance you still can do it by for a transfer of the whole + +[40:00] amount. And then refunding so, that's an option as well and it keeps things simpler and you know you find this useful we can extend the token interface eventually yeah especially. Because this is you know we're not talking about just a standard token right this is the token interface right. So this is you know every implementer is going to have to implement this and I do think, that we need to as much as possible keep it simple and you know say what you want about the erc20 interface it is fairly simple and I think there's something to learn from, that like this whole thing came up during those next discussions and things like a lot of people were interested like what. If I need to send a volatile amount of token but. If that's really more of a hypothetical case and in reality it doesn't really matter much + +[41:00] yeah I agree, that we can just keep interfaces important. So unless someone has a very strong use case for transfer Max what's profit for. Now now does everyone agree yeah any supporters of transfer Max want to speak. Now okay. And then the next topic is allowances and yeah I think there was some controversy about this. But basically what I like to have in the end, which we may not necessarily achieve. But or anything, that things like persistent and limited nuances are inherently dangerous and I mean the luxury some data about millions of + +[42:00] dollars was a persistent limited alliances on other blockchains. So I think there is some evidence, that this is not the best pattern to use but, that said like series seemingly a pretty narrow slice of cases where it might be useful it is still dangerous. But it could be useful like the case of recurring payment or subscription for, that cases I think, that using account obstruction is the correct quote-unquote solution. Because it enforces the actual recordings like. When you are doing a recurring payment the actual requirement is not I allows this contract to spend as much token on my behalf of the font. But I allow this token to spend up to X token during the time period of and weeks or months or + +[43:00] something right. So the question here is like. If we want to still have persistent allowances at all in the token interface given, that they count with certain risks and another thing is again another proposal I had is just temporary allowance, that exists only during the contract called execution and, that's mostly useful for the cases similar to like, which receive or maybe even something not converted than, that for you want to take the token from the user and it between several parties for example again I recognize, that's + +[44:00] probably not the most useful use case necessarily and I have seen I have exact concerns about the need for the temp storage. Now but I feel like at least. If you are talking about all answers they will do much more comfortable with like temporary allowances. Because the main issue is persistent tolerances is, that well people just forget about them. And then you know it's in some way compromises and you would lose your token without much actions from you whereas like. When we are talking about something temporary it exists only business scope of a single transaction. So yeah basically two questions wait do you have any arguments for and against persistent allowances in token interface and. If you have something against or maybe for the temporary allowances at all and + +[45:00] then we can decide like what do we do close the Nema around this concept of like persistent using account abstraction instead of persistent allowances for things like subscriptions can you ex can you like further explain how this might work I don't fully understand, that yeah. So I kind of abstraction allows you to basically do signature OS transactions. So instead of making like you know signing every operation done with your account what you can have is, that your account may have a function, that you need to all tray such as for example at subscription or add your current payment is whatever requirements, that seem useful right for example span token X up to certain amount is in certain 10th window. Then may have + +[46:00] certain options right how this time window it's been applied. So I guess this totally depends on the business requirements those opinions, which is why like an alternative to, that would be to do some timed approvals for the token. But it seems really complicated for something as simple as token interface right. Because there may be different requirements for this and typically are like recurring payments is just one example like there are other examples where you can have a session or something. But anyway. So you have a function, that says add a subscription right you authorize it and write an entry owned by your account to The Ledger. Then describes this subscription and then. When someone tries to authorize something on behalf of your account depending likely to arguments maybe whatever your contract decides to do. But the point is, that instead of verifying + +[47:00] some signature it would expect the code to have no signature at all. But instead it would expect a certain entry being present in. So edgers for example in case of the recurrent payment you would expect, that. If a contract tries to call a token transfer on your behalf without any signatures and it must have a subscription. And then it would fetch a subscription from The Ledger verifies, that the send amount does not exceed the remaining balance for the subscription and. If so it would allows it would it be fair to say the advantage of doing the account abstraction approach to subscriptions is it lets the code live in the actual wallet and be more complicated, that way versus just being implemented as like an approval mechanism in the token, which is a bit yeah. So there's already a couple of NHS one is customization like you + +[48:00] don't need to attach the token interface and you don't need to restrict yourself to tokens right you might also you know I'll try some text to trade your entities or something like I'm making this happen it's a bad example. But you get the idea it's not limited to token and it's not yeah like certain way of defining the time balance for example and another Advantage is, that you are the owner of the account, which again in case of persistent allowances for example the allowance is delegated to a third party you have no control over it and. If it's compromised you're in, that situation here you will be screwed only. If your account is compromised. But I mean, that's a general problem of any account. But at least you do not put any additional trust to the third party like you just Define rules, that for the third party to + +[49:00] use your funds sorry go ahead like for this it sounds like you need special accounts, that can actually handle the logic for like understanding subscriptions. But also you're kind of like delegated you're moving the responsibility of like the subscription logic from the you know from the merchant to the account like what do we expect, that Merchants are going to program accounts for users to do this I'm a bit confused yeah, that's definitely a general issue. While account obstruction seems like a nice idea like in reality they have already a ton of Stellar accounts answer right, that you know I would imagine the apps would want to build for, which is why I'm not saying necessarily this is a Only Solution I'm just saying, that it is + +[50:00] an architecturally good solution and it's good from the security standpoint and in order to kind of mitigate the issue of you know for example Quest extra account not supporting anything like, that I had another proposal here on this card code extensible accounts and what it comes down to is, that you basically kind of can add delegations for your account to delegate authorization from your main account to another account and think of it way. Because adding a module for example you can have a classic store account with a subscriptions module obviously someone needs to write this module account doesn't have to be Merchant it can be you know one library, that provides it. So it is an unzero amount of work obviously but. If shareable is one is there a some standards like hey it's + +[51:00] module standards allows to do subscriptions for example. So anyone who can or who wants to do subscriptions just needs to make sure, that kind of has this module again this is just a proposal I had and I think it's also applicable to the more broad case of a I want to do something interesting with custom account contracts. But I'm worried about and working. So these extensions it is possible you know you don't require a user to just migrate to your account implementation necessarily. But you just allows them to reuse their main account address. But benefit from some new features, which I think maybe generally useful thing to have or adoption of customer accounts the + +[52:00] extensible accounts thing is super interesting I think it ties in really closely with contract upgrades. Because accounts are generally you know really long-lived and you want to be able to upgrade like. If the subscription interface is a new thing or there's like subscription to V2 right you don't want to be stuck with your smart contract wallet being on a previous version yeah and I guess the good thing about it, that kind of allows for experimentation. Because I to be completely honest I have my doubts about like general adoption of account abstraction. If you just say hey you are the store account. But you can also build your own things it seems like a pretty daunting task and you know. If you can at least build things in a model or fashion hopefully there will be more use for, that yeah, that's, that I realized like this might be a separate discussion week. If + +[53:00] not everyone has read this proposal I would feel okay ish it's just leaving the persistent alone Soldier and adding a disclaimer about wait don't use this unless you after as a simplest solution. But you know my last tense is basically, that I really sooner or later we should just get rid of, that and provide on abstraction for, that yeah it's just it custom inexpensable accounts yeah it sounds like there's a tension here between like what is kind of like secure and has like you know proper architecture elegant and like extensible versus what is you know kind of like simple and generalizable, which is like what we have right. Now with the persistent allowances my vote is to keep persistent allowances there. Because + +[54:00] because. If we're going to you know rely on people implementing their own account abstraction or account abstraction like modules for subscriptions. Then I think, that's a bit of a high barrier of Entry I wonder how popular subscriptions are in general how comfortable I have people with them especially. When it's done in the form of I just given a limited allowance to this contract. So I'm not necessarily convinced, that this is like a super strong use case in general. But you know as I said like. If you add some disclaimers and say, that you know like this is us next and visits our us framework whatever you should be able to avoid this in like 95 of the cases hopefully, that's good enough. Because really the kissing like why I started this topic at all is, that in reality you shouldn't need allowances at + +[55:00] all like most of the time right yeah I would say, that subscriptions right. Now in crypto are definitely not there yet. But in the world of merchant payments they're huge and we can't ignore it. If if we do believe in kind of like you know wide adoption of yeah definitely. But but I guess the question is here rather like would you want a simple. But insecure solution or you know should be right let's try for providing proper secure support and I don't think they're not they're necessarily like mutually exclusive right. If you know. If we do have persistent allowances and we come up with like a better mechanism through like you know abstraction, that's great. But I think just having something, that is kind of like a simple catch-all mechanism is still worth having yeah it just means we need to support + +[56:00] persistent approvals forever and everyone implements a token does as well. Because it's part of the token interface yeah, which is you know For Better or For Worse is an established practice like across the ecosystem right yeah again coming back to the collection of requirements versus limitations and reasonably certain, that persistent allowances are a result of the limitations of experiment right Reliance on, that single subscribers single signer and a single call right they really are not needed most of the time. So my main concern would be really not about the subscriptions necessarily. But about just people music out of an art of habit again they are not needed most of the time I think + +[57:00] that's, that's, that's an important point about sort of one knows right it shouldn't make balances and there generally. And so but I mean as long as you can make this clear hopefully, that's okay yeah I definitely agree and I think, that both in the documentation and in wallets, that Implement signing these there needs to be kind of like you know huge big red warnings whatever you sign you actually sign in allowance. Because users shouldn't be doing this on a regular basis yeah as long as you say, that yeah it's reserved for some specific use cases I was gonna say yeah I agree the smart contract subscription model for, that is definitely better and it's nicer + +[58:00] but it opens a huge can of worms in particular around like you've approved with the approval model you can say to the user you've approved this amount for these wallet transfers right, which is kind of easy to understand but. If you're getting into like more advanced logic within the smart contract well, that's a big ux challenge to explain, that to users in a way they can understand, which like we're not going to solve in the next six months given everything else we have to do yeah I mean, that's definitely about a week, that's the same right there is some upfront cost before we get nice things yeah I'm sure people in the community are looking into, that could be a pretty nice to build on top of this account obstruction framework. But yeah I definitely recognize, that it is a pretty high entry cost right. Because I mean those wallet support and punching support. So yeah not + +[59:00] sure how we will perseverance. So it sounds it looks like we're on time I would say I think for. Now it sounds like we're leaning towards keeping persistent approvals Precision allowances and we probably need to further discuss the idea of temporary allowances. But but we'll push, that to next week cool well thank you so much have a great day y'all see you next week + +
diff --git a/meetings/2023-04-06.mdx b/meetings/2023-04-06.mdx new file mode 100644 index 0000000000..ed7b6d0ab6 --- /dev/null +++ b/meetings/2023-04-06.mdx @@ -0,0 +1,127 @@ +--- +title: "Transient Storage and Upgradable Contracts" +description: "Design discussion covering a proposed transient (ephemeral) storage feature and a v1 contract upgrade mechanism, weighing developer UX complexity vs. benefits, and outlining a protocol-level approach to make upgrades observable and secure-by-convention." +authors: + - dmytro-kozhevin + - graydon-hoare + - leigh-mcculloch + - paul-bellamy + - siddharth-suresh + - tomer-weller +tags: [soroban, CAP-46-2] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session focuses on two contract-platform capabilities: a lightweight transient storage concept for “only-needed-during-execution” state, and a v1-ready upgrade path for deployed contracts. The group explores where each feature helps, what it costs in complexity, and how developers and tooling would reason about it. + +The discussion leans toward minimalism and clear observability: avoiding extra storage modes unless they unlock compelling real-world use cases, while prioritizing a protocol-level upgrade mechanism so contracts shipped in v1 aren’t permanently frozen without an upgrade path. + +### Key Topics + +- Transient storage proposal: ephemeral state that avoids ledger writes and footprint inclusion, aimed at lowering transaction cost/size and ledger growth +- Primary motivating example: temporary allowances/approvals during multi-step cross-contract flows (e.g., distribute funds across receivers) without persisting allowance state +- Concern: limited concrete use cases today, and adding another storage mechanic may confuse developers (especially alongside state expiration and other storage behaviors) +- Token interface impact: desire to avoid “multiple allowance types” and extra surface area if persistent approvals remain the standard +- Related ecosystem context: similar ideas in Ethereum (transient storage opcodes) and its relevance there for re-entrancy patterns; noted as less motivating here given current execution model +- Conclusion on transient storage: consensus to remove/de-scope due to narrow benefit vs added conceptual overhead, unless strong use cases emerge +- Upgradeability motivation: without a built-in mechanism in v1, contracts deployed in v1 that don’t include self-upgrade hooks could remain unupgradeable forever +- Upgrade approaches compared: + - Storage delegation/proxy-style patterns (powerful but more manual machinery and tricky ergonomics/performance in a VM-heavy environment) + - Protocol-level “update contract executable” approach (contract updates its associated WASM hash via a host function; auth logic stays with the contract) + - Optional extension: allow executable indirection to another contract (cheap proxy-style “shared implementation” pattern for fleets of instances) +- Observability argument: protocol-level upgrades are easier for explorers/tooling to detect than bespoke proxy patterns, making behavior changes more auditable and less “hidden” +- UX/security tradeoff: discussion of including the WASM hash in signed payloads to invalidate old signatures after upgrades (extra safety vs potential user friction) +- Practical note: footprint/preflight behavior already makes some “stale view” failures likely, reducing the incremental benefit of hash-in-signature in many cases + +### Resources + +- [EIP-1153: Transient Storage Opcodes](https://eips.ethereum.org/EIPS/eip-1153) +- [CAP-0046-02: Smart Contract Lifecycle](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046-02.md) + +
+ Video Transcript + +[00:00] Okay let's get this party started hey everyone welcome to storybond design discussions today we're gonna talk about two topics transient storage and upgradable contracts I'm going to take over Justin and I apologize in advance for my nasal sound I'm a bit congested is Dima here yes Dima do you wanna take us through transient storage yeah sure. So is this really a pretty small feature and I don't really want to spend too much time on it like. If you think we don't need it let's just drop it. But basically the idea he said it's also next the cross contract interactions are much more capable than without it for example. If a contract wants to do some temporary allowance + +[01:00] to distribute funds between multiple receivers or something like, that it could create cause increase allowance function and do as a corresponding expert transfer phone call right and the thing is, that allowance in this case is not needed to be persisted and my initial intention here why I came up with this ready in the first place is, that I didn't want the token to have any persisted allowances. But but yeah this was a topic we have discussed last week and it doesn't seem like the general consensus is, that the time being on the persistent allowances to still remain in the token yeah. But original this idea was. If you want to create something, that is needed only for the duration of the + +[02:00] time you could use this ephemeral storage or whatever his name is and basically its benefits were, that it doesn't need to be included in the footprint and it doesn't need to be written to The Ledger, which is beneficial both from the like transaction size standpoint transaction costs 10 point and it's beneficial for The Ledger as well and the other thing is, that from the interface 10 point it probably would be pretty simply to use from the SDK perspective. Because we can abstract it away. If needed and meaning some generic interprets or maybe you can switch your plug or something. So I wouldn't be too concerned about SDK side. But I guess the main concern is + +[03:00] that valves running too many use cases for, that at least yet and I kind of agree especially. If he just don't want to go with temporary allowances and we go with persistent allowances yeah I think I don't actually want to upload the token interface with like multiple allowance types or something like, that. So you know you can drop it all we can view it. But not use it in token I don't have a really strong opinion like there are definitely some interested cases one can come up with you know one contract calls another contract multiple times. And then as a contract made want to maintain some state. But but yeah maybe this cases are too narrow to worry about them. Now I guess, that's I have to sleep. Now got it. So it is worth mentioning, that this thing has been discussed a lot in the Ethereum ecosystem and I think + +[04:00] they're getting pretty close to actually putting something like this, that's the AP 1153 transient storage op codes and in the Ethereum ecosystem it's also something, that's been considered in the context of re-entrancy attacks. Because it allows you to basically kind of like a contract instead of flag, that it's like being processed right. Now and then. If there's like a re-entrance. Then the contract can check, that flag I think for us it's less relevant right now. Because there's no re-engency possibility at all and I think, that. If we're gonna add re-entrancy possibility there's probably going to be a flag in the call function, that allows you to set like whether or not you accept, that. So I think, that's less of a motivation for Soroban my question to you do you mind I think I asked this in the past aside from the allowances use case is there are there any other examples, that you can point out too + +[05:00] so actually speaking of reinsurance I think. If we do it in whatever ways it might be actually useful and I just forgot to talk about this I don't know. If I don't think there are planning arrangements for everyone maybe eventually I feel like it will be beneficial to be able to write some Perfection bits even. If it is controllable on the individual contract overall. But otherwise I don't think I have a super good concrete example I've been thinking generally about you know us tickets for approvals it's just a case of, that. But you know in general you can imagine something like a custom accounts a smartphone creating some authorization tickets, that might be used during the + +[06:00] execution time for multiple calls. But you know I guess the main issue here is, that since this thing like this does not exist for example in a cage yet right I guess run many users of, that. Because well there's no such feature. So yeah. But I cannot come up with like I'm very convincing case right. Now like I have just some fake ideas I don't know obviously it can be implemented efficiently in azervation maybe they can yeah. But the important part is, that well yeah like I understand, that use cases are probably not, that obvious. But also the maintenance cost is pretty low as well. So it's not like a huge shade of it's not very good you know complicated subsystem we need to maintain as well. So yeah, that's something big got it. So I think, that given, that last + +[07:00] week we decided to keep long lasting approval or persistent approvals and given, that you know simple authorizations are handled by off next without actually requiring allowances at all I would say, that this is a pretty narrow use case and we'll clutter the asset interface. So I definitely agree with the sentiment, that we shouldn't include it and I wonder. If anyone else on this call has a different thinking about, that. So you're saying not only remove it or like don't make any changes the token contract. But also remove it from and the SDK right. Because it's been, that + +[08:00] like the tooling for this has already been implemented right yeah my vote would be to remove it especially given, that it's such a narrow use case and also you know with State expiration we have a lot of different storage mechanics in the end and I think, that adding another storage mechanic can just add to the product confusion for Developers yeah I agree. If we're not going to use it. Then and we don't have any other good use cases I'm fine with removing it yeah at 10 degrees well it does sound like a very cool feature. But it also to a large degree it's an optimization and yeah there definitely is like a complexity cost to developers needing to be aware of this thing. So you know knowing, that it should or shouldn't be in a + +[09:00] footprint, which I guess is largely taken to get you know care of by the fact, that we generate the footprints for people. But but may have developers needing to make a choice about what they use it just seems like one more thing, that people need to know and learn about or make a decision about, that for the most part it sounds like we would be telling people don't use this isn't something, that you would use except for you know these educations, that is a very strong opinion I can imagine finding use cases for it but. If we're aiming for minimalism. Then I guess we remove it I don't really care is it in the current release the release has just gone in. Now okay it sounds like we're pretty + +[10:00] in agreement right. Now on removing this. So so I will we'll go to the next topic, which is upgradable content yeah one small thing on this like since it is in the current release someone comes up is a good idea for use case please let me know. Because you know. If there is something legitimate, that people can use right. Now and they can play with it. So you know we. If it is. If it seems useful like we can reconsider this trade. Because it's not really correct oh it is not no right oh okay, that's a bit unfortunate oh okay I guess you still can write. If you have some ideas yeah and once it's asking. If it was it wouldn't be a complicated to include Post Main net it will be somewhat complicated in a sense, that it will + +[11:00] require a protocol release. But yeah it just will be one not complicated okay next topic is upgradable contract. So Dima you distributed a doc a couple weeks ago, that outlines three different proposals and you've implemented one of them can you give us a brief overview on the different on the problem and the different solutions and focus on the one, that you're proposing yeah sure basically I essentially want some contract upgrade mechanism to be shipped in V1 just. Because if there is no such mechanisms and contracts written in V1 will stay unupgradeable forever. Because no matter what mechanic we come up with it will need to be controlled by the contract itself and you know. If contract + +[12:00] doesn't have an updating kindness there is no way it can upgrade itself. So it is of course possible with the current tool set to build something like a proxy. But the issue is, that the state will need to like the state of the proxy contract has to belonged as a top level contract 80 and currently it's not possible to achieve it. So we need something to address this issue, which is why I had things a proposal an assumption line were said okay we could add some sort of storage delegation mechanism, which I think someone quoted existed in Solana maybe some other chain I believe it was one. But it could be wrong here. But basically it's just one of the approaches, that is possible or you say hey I allow or not yeah I + +[13:00] allows, that contract I'm calling to actually be using my storage it's not super straightforward to implement I guess and also it is going to be relatively manual in terms of like besides the storage delegation mechanism you still need to write all the focusing machine Machinery, which might be an overkill for some cases. So second option insert option. But basically three depends options three develop since option two and yeah for the option two what we do is we just allows the contracts to update their conflict executable entry as we call it. Now or basically contract says this is a reference to my implementation versus references hash of the Horizon web installed engine. So + +[14:00] this is Proposal with Zipcar is already deserve it just to allow contracts to call host function and update the hash of this wasn't blob to be some other value performs an updates. So basically the update logic and authorization logic is on the contract developer, which makes sense a he wants to have different authorization approaches to this. But the main point is, that it is. Now possible to just upgrade the button, that has been used as an addendum to, that is. If you folks suggested. Then the I have this implemented as well we will and wasn't hash to their standardized signature payload for sales just so, that you know + +[15:00] will basically get a some feedback. If something has been updated once they were sending the transaction, which I guess might make sense. If something has been signed a. While ago well yeah, that's kind of safety measure, that's comes with this obviously there are some security concerns. But my guess is, that this is not significantly more different contract uality based sequels it can be strong funds from it or something like, that there are some angles to attack service contracts. And so on. And so forth. So basically it's ultimately on the + +[16:00] contract developers and the good thing about this approach is, that it is very easy to tell, that the contract is upgradable like it cannot be obfuscated as far as I understand. Because you need to follow the host function and kind of obfuscate the plus function calls. But should be visible even from the Vasan blobs, that the contract is subgradable yeah and also as a benefit compared to the first option where we just automate in the storage access this approach allows to easily extends the interface. So for example say you have some NFT contract. And then after a year NFT standard has been ironed out and some new functions have been added to it you may just update implementation of your NFT and provide some new functions, that's maybe a bad example I don't know. If that happens or not. But you get the idea right, that it's possible to extend the interface without any special + +[17:00] machinery and it's compatible with them and host environment and option three is just another feature on top of, that where we allow besides a point generator wasn't hash be allowed to point it a different contract and use these different contracts wasn't hash as implementation and once this gives us basically a like very cheap proxy pattern or it is possible to do centralized upgrades of the implementation on a bunch of contracts this is how I have noticed some bigger contracts work like uni swap I believe where they have some implementation contract and you know it can be updated in various ways for example there could be some voting or something like this. So + +[18:00] again authorization is kind of on the contract Traders and it can be as complex as needed. And then soon as the implementation is updated every instance of the contract gets update immediately, which is close fast and cheap and it is of course more narrow use case than just upgrading a single contract instance. But arguably it is pretty important as well. Because well it affects maybe a smaller fraction of the contractors are likely the most meaningful contracts like uni swap. So basically this will probably concern anyway token mm implementations and stuff like, that. Because this tend to have many instances and say probably should be controlled by a single entity a lot of the time + +[19:00] so yeah this is options option two is, that is yeah all the parses there. So it's only a matter of review and it's very easy extending it is not hard as well. So I don't see a very good reason to not do, that besides maybe some additional ux complexity. But I saw some folks had some I won't say, that. So and, that's. So I do feel like there's one possible solution, that is not mentioned here and it may be. Because it's like really bad from a performance perspective. But you could always create like a simple kind of like contract, that's, that just like stores things. So like a storage access contract. And then the + +[20:00] proxy can like you know can pass, that as an argument to whatever implementation contract it uses right. Now right and, that's something, that's pretty much doable. Now yeah I guess, that's an option, that kind of yes of house supports for option one it is still like still wouldn't tell for the interface updates as I mentioned and of course this is really slow and I think it's all enough to the point of being as well as your attaching the storage wasn't like a couple of times okay separate to the performance issue I am curious on how authentication auth would work in, that flow like with the auth be between the two contracts or + +[21:00] like how would you like the nice thing about auth for users or with the contractors you're sort of like scoping their users access potentially. But would we be able to do the same thing for a contract, that's deeper I guess we can with auth next sorry I like, which approach are you referring to I'm just talking about what time it is the idea time it was sharing where data is I guess it doesn't really matter you just like your storage interface instead of like calling storage through then. If you would call storages and as a contract and another contract would rely on the invoker being the admin something exists. So basically it is definitely doable other or not it's a good idea isn't + +[22:00] probably it's not the best CD. But I mean yeah it kind of works and presumably it's a bad idea. Because we were talking about what I described as like three separate wasn't environments in it right yeah it's a ton of the Cross VM calls and I mean every storage Bridge would be another VM read the right would be a VM code and VM codes are really expensive, that's the most expensive thing you can do, which is why kind of in favor of built-in mechanisms. Because besides providing some unique features they are also much more efficient Point like additional VM instantiations, which are expensive right but. If we go in the path of introducing protocol changes for this you can imagine, that like this and again + +[23:00] I'm not attached to this idea in any way. But you can imagine a world in, which we take this you know simple contract like simple storage contract and nativize, that so, that it's native code I'm not sure how would we negative is it won't be possible oh yeah well. So is, that worth doing like I'm curious why the like this, that approach is more beneficial than the solution too like just using the host function, that's, that's a good question. So I think solution to is possible my knee-jerk reaction the solution too is, that the fact, that a contract implementation can change is not trivial like it's you know especially. If you look at other + +[24:00] smart contract platforms out there right. Now now I feel like, that is going to be something, that is a bit more difficult for people to wrap their heads around I'm not sure how is it different from the processor implementation change is exactly what happens it says, that instead of changing the contract, that is serving as implementation you are changing the implementation itself, which is actually easier to trace a generic fashion. Because it's independent of any contracts yes from a user perspective I agree with you but. If you look at the USDC contract, which is a proxy contract right. Now this you know it never changes right it's always still the proxy contract and the state changes and there's like a different address, that it forwards to. But the contract itself + +[25:00] didn't change right it's like an observer perspective like a block Observer perspective well is it actually a good thing I don't think I think, that's a good thing. Because I think the fact, that the actual code, that runs like the fact, that the actual contract code and this is semantics. So just there's a thing right it's a contract Behavior may change and it is actually hard to notice, that it has changed pressing the second solution emit an event probably and it will be very clear, that the implementation has changed I think it is it actually makes things more observable safer right. Because well you can argue yeah it is just a proxy right. But it may point it at a different implementation and. If block Explorer actually cared about, that it would be pretty tricky for + +[26:00] break it would need to know how exactly this proxy reference is being stored or you know introduce and standard based events or something. So I don't quite agree, that it's necessarily a better thing. Because it creates a false Impressions, that the contract doesn't change or it actually like its Behavior actually does change in fact. But it's just sophisticated yeah I think there's basically no way for us to I agree with Gemma here I think there's no way for us to prevent contract behavior from changing contracts can just change Behavior, that's, that's, that's their nature they don't always. If they did one thing yesterday they don't necessarily do the same thing today. Because they might have code in them, that says you know. If it's Wednesday do something different right there's always the possibility for contract changing what you're getting out of something being built into the protocol is a standard way of expressing a pattern of contract change. And so block + +[27:00] explorers or anyone else can actually observe, that particular type of change, which is upgrades and upgrades are you know the least pathological and most expected type of contract change, which is, that someone just pushes an update to their contract. Because like life goes on and they've added teachers or they fix bugs, that's, that's something everyone does in software. And so I think expressing it in protocol is actually a good idea I'm in favor I think two or even three I think three can be layered on top of two like it can be done as a future extension so. If it's going like MVP like we could just do number two initially. And then have you know an additional flag on the thing called indirect reference rather than direct reference and, that's not the end of the world. But I think building another product I mean this is like I'm a broken record I always want to build stuff into the protocol. So this is no surprise coming from me. But I'm definitely on the do it in a protocol level it's 100 of contracts are going to want to upgrade themselves at some point. So it feels like supporting, that is quite natural + +[28:00] yeah I think, that was a really good explanation from D mind grade I'm I've changed my mind also one more Point like you have mentioned native as in the contract I don't think you have a good user story for, that. But I think actually this functionality probably can be leveraged. Because I'm not 100 sure. If it will be ever able to kind of make current contract implementations native without a lot of fun guns especially like alternative implementations of the same interface, that are slightly different, which means, that you know in the future you could say update current contract not tourism. But to any different implementation so, that's another consideration and another benefit of having this in protocol versus just something, that Hulk + +[29:00] builds with contracts foreign there's one aspect of the way this we're implementing this, that I saw getting discussed in Discord there was conversation about changing how Earth worked with updating so, that signatures froth included the wasn't hash itself so, that. If an update occurs everyone's signatures get invalidated I'm curious I guess like what the motivation for, that is yeah I'm to be clear I am kind of neutral to, that idea I think you can either do it or not do it there's a motivation was mostly about culture and security implications like it adds + +[30:00] some degree of additional confidence right in what you're signing coming back to the most example of Hayes is the same contract well it kind of can be sure and I think this makes the most sense. When when probably more concerned not about the implementation changing to something, that is clearly malicious say you know there was a token balance and sensor it's an update and admin. Now can transfer your whole token balance elsewhere oh they probably can do this anyway. But I got the point right. So like we are not talking about malicious upgrades here. But let's say you just want contracts to be audited. If they are updated like you do not trust contracts by default would say we have some database of trusted contracts right and. If the contract is getting + +[31:00] updated chances are it won't immediately get to this database for example you know we can connect your wallet to it or something like this, which kind of makes it reasonable to just sign for a certain implementation and you know. If it has changed. Then it will basically get to learn it about this or something like, that. So basically your wallet will be more aware about what you are signing and there is more possibilities for the audit let's get inside of course this is all just Theory I don't know this will happen in reality. But this is something, that we enable. If we add implementation to the US payload. So let's say it again yeah. Because it sounds like I wasn't sure. If this was like part + +[32:00] of the core idea what, which I think it sounds like it's something we're just sort of considering on the side. Then yeah I think my concern about it is, that it's more of like an advanced user it sounds like more of something, that like an advanced user would care about I'm not the general user and to enforce something like, that on the general user I think would actually create problems you know in the moment, that a contract upgrades anybody who's currently submitting your transaction is going to fail the vast majority of those users probably don't care and they probably just want to go and gonna have to go resign something, that they would have been happy to go through anyway and I think I like we always have the weak link, that a contract developer could just implement the proxy button sorry, that you know the thing, that you're signing doesn't change. But the thing with the logic actually is will yeah sure it's I guess as I said it's + +[33:00] more about the question of Trust basically let's say you have some swap contract, that doesn't actually hold any balances. So it doesn't really care about you know. If someone can suddenly drain funds from it. But you do care about its implementation and again this kind of needs some mechanisms for accessing some audited implementations results, that of course it makes zero sense with, that it makes some sense for the cases. When you have some contracts, that per C doesn't matter much. Because yes country proxy pattern could be implemented right. But then you know like would you trust this contract in the first place right yeah talking about this use case for you know you worry about the contracts being audited and + +[34:00] right, that's something I don't really know for. Now how, that would work right. Because in general having some trusted contracts contract implementation databases is something, that seems like a good idea. But for those days will need to be built by community and I don't know to each degree only to cover all the cases. So yeah as I said I'm kind of partial in the city as well I understand some arguments for it and interest against it you know for some it will be just an annoyance thing not sure maybe we should ask for Timbers and thinkers opinions. Because saber advocating for I didn't maybe they have more arguments as well right yeah. So I'm I don't disagree, that this probably has some utility. But but I feel like there's an opportunity here + +[35:00] for us to implement this in such a way, that it's not required you know it's not part of the like you know. If we make it part of the signature. Then in theory everybody has to buy us into this model this very over cautious I'm only going to use something, that's yeah like basically buying into, that whereas. If we find a different way to implement, that requirement such, that it's not actually part of the signature. But maybe it's part of the you know like we have the footprint for example maybe we have something like the footprint, that lists out the wasm hashes of the contracts, that you're invoking and the protocol just checks, that. So it's actually part of the signature for the actual invocation. But it's part of the signature for the transaction maybe I don't know yeah actually, that's a great point now, that I think about it you kind of oh it's not you it's a transaction Source who is signing the footprint + +[36:00] but, that's an interesting point. If the contract updates, that transactions will anyway fail. Because they are accessing a different wage or entry it just kind of interesting right like of course it needs to happen in the short time window between like pre-flight snapshot getting updated and the contribution update is in this window and the transactions will still fail. So yeah I don't know. If it would necessarily be part of the footprint. But maybe something like the footprint. If it can't be part of the same thing yeah just where you could list out you know these are the contracts, that I'm willing to be executed in this oh right yeah. But but truly thinking about it seems like the window where you would sign something basically you could just do this soft chain right maybe you shouldn't really worry about plotting the transactions with this. Because like. If you really + +[37:00] care about, that and your wallet has some logic, that can, that has some base of trust and implementations right it just can be maintained by doing some redundantly requests to the lecture snapshots, that are presumably fresh and probably from the security system point it's going to be really, that much different from signing the version hash. Because because the time window for the contract has been updated. But the snapshot of The Ledger hasn't been updated kind of coincides with the pre-flight call anyway. So your transaction will still probably fail. Because it has invalid footprint maybe signature shouldn't be even concerned about, that like first really need to align for you to basically sign a call without knowing, that the contract has been updated and actually relating it as I'm not sure + +[38:00] possible. So yeah I think, that's maybe a good point and we shouldn't worry about it's too much and it seems like the just the forward side implementation is reasonably safe. If you really concerned about what contracts you're calling are there any other open questions Dima about the implementation of proposal number two, that you want to consult with a group actually I think it's super straightforward yeah I think it's pretty easy to use and doesn't introduce too much percent unless as well. So yeah + +[39:00] okay are there any other questions from anyone else on stage or in the audience okay this has been very productive thank you all and we can keep chatting on the on Discord. If you have any further questions have a great day y'all + +
diff --git a/meetings/2023-04-13.mdx b/meetings/2023-04-13.mdx new file mode 100644 index 0000000000..5248551a8b --- /dev/null +++ b/meetings/2023-04-13.mdx @@ -0,0 +1,336 @@ +--- +title: "Getting Started with Soroban and State Ejection" +description: "Design discussion and workshop covering Soroban state expiration, rent mechanics, automatic and manual rent bumps, temporary entries, and hands-on demos for writing and testing stateful smart contracts with Soroban tooling." +authors: + - anuhya-challagundla + - dmytro-kozhevin + - garand-tyson + - graydon-hoare + - kalepail + - leigh-mcculloch + - morgan-wilde + - nicolas-barry + - paul-bellamy + - siddharth-suresh + - tomer-weller + - tsachi-herman +tags: [soroban, tutorial, CAP-46] +--- + +import YouTube from "@site/src/components/YouTube"; + +## Rent Bumps and Temporary Entries {#part-1} + + + +This session explores how Soroban manages contract state over time through rent, expiration, and temporary entries. The discussion focuses on reducing ledger bloat while preserving developer ergonomics, walking through how rent is charged, how entries are kept alive, and how developers and users interact with these mechanisms. + +Participants also dig into open design questions around rent bumping: what should happen automatically, what should be explicit, and where responsibility should sit between protocol, contract developers, wallets, and transaction builders. The goal is a system that is safe by default, flexible for advanced use cases, and understandable for developers and users. + +### Key Topics + +- Overview of state expiration and rent: entries must maintain a non-zero rent balance to remain usable +- Automatic rent bumps applied on access to contract data, contract instances, and WASM code +- Motivation: frequently used entries stay alive without manual intervention; rarely used entries naturally expire +- Manual rent bumps for infrequently accessed but latency-sensitive data (e.g. long-term token balances) +- Design tradeoffs: + - Manual rent bump operations that specify ledger keys directly + - Exposing a rent-bump host function to contracts to abstract key management +- UX and safety concerns with contract-controlled rent bumps (griefing, accidental overspending of refundable fees) +- Debate over responsibility: + - Contracts know which keys belong to a user + - Users or wallets should decide _how much_ rent to pay +- Emerging middle-ground ideas: + - User-supplied “auxiliary rent bump” values combined with contract-selected keys + - Leveraging transaction footprints to bound and make rent behavior observable +- Default rent for newly created entries: + - Minimum required to survive initial bucket levels + - Optional immediate bumping via transaction metadata rather than contract logic +- Deletion semantics: + - Outstanding rent balances are burned on deletion to avoid refund gaming + - Rationale compared to other chains’ storage refund models +- Emphasis that rent affects performance and availability, not correctness or security of unique/temporary state + +### Resources + +- [CAP-0046: Soroban system overview](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046.md) + +
+ Video Transcript + +[00:00] Okay let's get started. So happy Thursday everyone this is Soroban Design Discussion an Open Protocol Meeting in, which we discuss design decisions for the Thor law and smart contracts platform to be launched very soon today we're going to be talking about State exploration. So without further Ado I'm gonna hand it over to Garand yeah. So I know we haven't talked about the expiration in a second. But I think before we were kind of talking high level what the interface wanted to be. And then now we're actually starting to implement pieces of it. And so kind of the timeline for this is first we're going to start charging for rent on futurenet. But not actually have any penalties for a rent balance of zero or negative just so, that we can make sure, that you know the rent is being charged correct and, that sort of stuff Next Step will. Then be to add ways to a bump rent. But again still with no consequences on futurenet. And then once we have the rent bump + +[01:00] and the rent charging correctly implemented will actually enable consequences and essentially what, that means is, that an entry will need to have a non-zero or non-negative rent balance to be usable. But we still won't actually be sending entries to the archive and doing them from The Ledger for some time still in addition to, that once the rent system is in place we'll. Then roll out temporary entries, which are the entry types, that have a TTL, that expire after a certain amount of ledgers and the CTL is variable. And so I know we've talked about kind of like at a high level. But we're actually starting to implement this rent stuff. And so there's a couple of open questions both in the rent proposal and in the temporary storage proposal, that we want to talk about in detail so. When we initially talked we didn't have automatic rent payments on access and this was an implementation issue with the way the bucket list is structured. However now we've changed the way in, which we actually record rent balances on Ledger. And so we can bump + +[02:00] rent on every access. And so with, that in mind we've changed the way, that we structure run payments how it currently looks is every data accessed in a smart contract call receives some small Baseline rent bump this isn't expected to be a very large rent bump. But it's something small and automatic. Now this includes not only contract data, that's accessed. But also the contract instance itself and also the contract wasm code. And so this also allows you to bump the smart contract itself and the smart contract code without needing any extra intervention or anything and. Because this is a small incremental bump, that's bumped automatically it means, that smart contracts and entries, that are used frequently don't really need to worry about rent you know something like USDC, that's used every day will have enough rent balance via these automatic bumps to never have to worry about doing a manual run bump. And so this automatic interface is a lot easier and covers a lot of the weird + +[03:00] cases in ux kind of pain points, that we were seeing before. When we didn't have these automatic ram pumps. Now the open question here is with regards is what do we want to do to in addition to having these automatic ramp bumps what interface do we want to expose for the quote-unquote manual rent bumps. And so this we have two kind of fields of thought here. So what a manual run bump is say you have an entry, that's not accessed very often. So you have like a token balance, that you only use every six months or one year but. When you use it you want it to be accessible very fast. And so you want to be I'm not in the archive and remain on The Ledger. So automatic rent bumps are not sufficient for this use case. Because you're not accessing it very often. And so the automatic rent bumps aren't large enough to keep this wallet or this token balance on The Ledger. And so we want an interface where a user can do a explicit manual rent bump instead of doing the small + +[04:00] incremental run bump say I want six months or one year or 10 years or something like, that of rent for this entry. And so kind of the two ways to do, that are either a via a manual operation where the invoker of the operation specifies the set of keys, that they want to bump. And so this is the most manual approach. But it's also has some benefits one of the significant drawbacks, though is, that the user calling this function needs to know what key belongs to them, which isn't always easy. So in for instance in a token contract you may be able to have just one entry and one key for each user's balance, that's relatively easy to discover you know like whatever my balance key is on USDC. But there may be other entries, that are also associated with your account in addition to, that balance entry. So for instance a token contract may also have a nonce, that's required to do transactions with your + +[05:00] token balance and. If a user only knows about the token balance ID. But doesn't know about the non-sid they may pay a bunch of rent on their token balance and have, that active on The Ledger. But then in six months. When they want to do their transaction they didn't pay rent on the non-century. And so they still have to wait to get the nonce value out of the archive. And so one approach to this is to allow the contract to Define rent bump behavior and we do this by exposing a rep bump host function to the smart contracts and what this allows contracts to do is to codify on the contract itself rent bump behavior in addition to the automatic rent bumps. And so in our token contract example say, that there is both a rent balance and also a nonce, that are have separate Keys. Now I'm not saying this is a the most efficient approach. But this is an approach. And so what the contract could do is it could Define a bump my account function and this bumped my account function I will use the host function to bump both + +[06:00] the token balance and also the nonce value associated with the user. And so in this way the user can just call the bump my account or about my account function. And then bump their account by six months a year the specified amount or. However much is codified in the smart contract and this allows the smart contract developers to abstract away a lot of the Run interface to the user. And so the user still can bump run manually. But doesn't need to know exactly what entries belong to them So it's a significantly easier user experience and also allows the contract to Define some sort of automatic behavior in addition to, that supplied by the protocol. So for instance the protocol might say a bump every access by 10 ledgers worth of rent. But then the smart contract developer might say oh for my use case I want to automatically Bump by a 100 or a thousand Ledger entries. And so they can use this rent bump host function to Define additional rent bump behavior in addition to, that automatic rent bump + +[07:00] that the protocol has. Now even the benefits, that this can provide a better user experience. But this opens the door to either malicious griefing or just accidental overspending of fees this is kind of dangerous. Because it allows the contract to Define arbitrary rent to take from the user's transaction fee. And so this can't be gamed in a malicious way. Because there's no Financial benefit to paying a bunch of rent for other entries like there is no way for the attacker to steal rent fees in any way. But this could be a griefing aspect where a contract call could say pay one year rent for all these unrelated entries, that aren't important to you. But still take, that from your refundable fee. Now again code audits could catch these attacks. But it could be very difficult. Because for instance they could name an entry such as like they could give an entry in a inconspicuous name like + +[08:00] author entry and have every call pay one year of rent to an off entry where it could be actually a different entry type, that's used maliciously. And so even, though exposing this host function could provide a better user experience it also opens the door to these kind of large fees, that contracts can Define. And so I guess, that's our first open question is regardless of. If the host function is accessible via Smart contracts it will be accessible as a manual operation call regardless. And so the question is do we want to just have, that manual operation call where the invoker has to specify every single key to bump or do we want to also open up this host function to the smart contracts and allow smart contracts to Define their own rent bump Behavior I have an opinion on this, that I want to talk about everyone else thinking. Because I've been thinking a person can + +[09:00] involve this and I'm really biased towards just doing a single post operation as it is invoked in a standalone fashion and not go with contractifying pump Behavior at least 41. Because as Grant has mentions right really a lot of issues with houses can be misused. But also really even. If you think about the ux aspect of it like we are talking about some abstract user here I'm not sure it's an abstract user who does the bump it's rather some program in between, that does, that which is probably evolved right and for example. If you are talking about the token balances I don't see any significant technical difference between implementing bumping your balance entries rent why it's a great + +[10:00] operational a separate contract call it's some sense it needs to be coded in the board interface right. And then provide it to the user like hey you know what your interests are about to expire and another thing is, that I really don't think there are too many entries, that user should ever care about pumping them I'd say this must be just balances and NFTs and you know even. If you have this Ledger key based approach it doesn't seem overly complicated to have support for them and have some basic white braces just builds the correct bump transaction for a given contract interface for example. So I don't see this as a pqx issue. But I do + +[11:00] see a lot of issues with no exposing this to the contracts and it just doesn't seem like a super straightforward feature to communicate with how contract developers should use it like I don't think they have a good strategy for this. Now so yeah, that's my opinion here it feels a bit or slightly inconsistent the fact, that the current storage model definitely encourage access through host functions rather than directly through like Ledger entry keys. And so to have this additional system, that does encourage access directly to the Ledger entry keys, that sounds a bit inconsistent to me well we kind of abstract to let your excess away right to you like this adapt developers to need to care about with your Footprints and basically rent can be improved + +[12:00] with some ideas floating around those were like maybe allow to specify pumps on the footprint for example or as Pro to the awake as a parallel entry to the footprint to, that you know you can do it as a transactional basically I think it's the main. And then the question is yeah whether we put this responsibility on the contract developers or on the engineers, that builds the transactions and I'm saying, that probably it should be on the side of whoever is paying for the transaction right. Because that's them who are pins are in and really for a lot of contracts there is no single strategies the contract Trader can raise realistically Define in terms of how the bump should be defined well kind of same stamping to do, that right. But in the end it's the entity, that pays for the transaction who needs to decide what to + +[13:00] do with the rent pumps Delaney bumps at home right. But it's a contract developer, that has the full understanding of what in the contract State actually relates to a specific account right. So where will this information be captured well yes. But you know I'm not sure like it needs to do anything this is a bumps like I can see the functions just expose the entries, that need to be pumped. But I'm really concerned about just doing the pumps myself from the contract like having some metadata, that says hey you need to bump, that for a given address is fine. But fully programmatic access seems to be able to do much more and a lot of this doesn't seem like the right thing + +[14:00] to do also it's been like you know pumpkin for thousands of ledgers. Then this might not be a user intention and yeah this just creates some pretty confusion ux for the contract users yeah current question. So a I do like the concept, that the end user oh no I should say the con the smart contract author can write his own function to modify the default Behavior I think, that's great I'm a little bit concerned, that the smart contract developer would need to write, that method it's something, that I kind of like not feel very comfortable around. Because it guarantees a user errors and ideally. If we could do anything, that + +[15:00] would provide out of box experience where it would just walk. Then it would be much better even. If they just walk wouldn't be the optimal solution well I think in either case whether we expose the host function or not it will not be necessary to define the sore function. Because we do have the protocol floor automatic bump rent, which happens regardless. And so this is just for. If you want in addition to the automatic a small incremental rent payments do you want additional control over how you bump and I also won't clarify, that this doesn't overwrite the default Behavior will happen whether you use the host function or not. So say like the for instance just for an example the default behavior is to bump every entry by 10 Ledger's worth of rent. If you also in addition to, that say use the rent bomb function to add another 100 ledgers worth of rent to a token balance the + +[16:00] resulting rent bump would be 110. The 10 ledgers, that were automatically bumped and the 100 ledgers added on top of, that by the host function. And so you're answer your question the automatic rent bump interface attempts to kind of give the best out of the box solution as possible with the thinking being, that the most accessed entries will have the most rent and be the most readily available. And so this is an addition to, that default Behavior. If that answers your question okay yes let's answer my question thank you. Now I think one interesting point, that moods is bringing up in the chat Dima for you in particular is, that should we allow contracts to Define initial rent balances. Because if we don't of newly created entries. Because this kind of has the same attack surface or I guess attack service isn't the right word. But perhaps the same accidental or purposeful misuse issue of exposing the rent bump function. But it's a really poor interface. If + +[17:00] every newly created entry just has the minimum amount of rent and you need to immediately do a host function operation. If you're calling to create to bump the rent. So I guess what do you think with respect to newly created entries and should we special case, that even. If we don't expose the rent on post function again like I'll try to answer quickly. Because we wanted to say something. But I'm still not sure. If it's a good idea either I feel like I should think more in terms of how can say contract user provides a needed brand pump maybe as an additional fear as I said like maybe some annotation on footprint or something. Because really. If you think about it as a contract writer like you're writing a token contract what should be the initial rent balance on a new balance entry I cannot really answer this question for + +[18:00] every user I'm not sure anyone can. So my intuition is, that for entries where we actually want to ever bump the rent balance it's highly likely, that it's easier to decide how much does it want to pump it. So yeah I'm really not sure like how contract writers would Define this initial balance in the same way, that's my opinion here yeah. So first of all I just want to say it is interesting, that we have, that we are discussing like these multiple approaches to essentially automatic for the user bumping the rim and I think whenever we do have like multiple things at play like this we do have this potential for it to become very confusing for developers about you know. When do they need to make get involved and do something themselves versus. When + +[19:00] can they rely on the automatic. So maybe just something to keep in mind like. If we are going to have these multiple approaches, that's just something we need to think about like complexity. But to the idea of having you know the contract do the bumping it sounds to me like it's sort of owning two responsibilities and I'm not we're talking about those two responsibilities together and maybe we can split them apart. And so the first one is knowledge about the keys and, that does really sound clearly to me like something, that the contract is responsible for contracts decide you know what storage how to arrange their storage it's not really in the user domain to you know to know, that information. And so it does really, that makes sense to me, that okay contract should be responsible and sort of telling a user somehow, that you know these are the keys, that relate to you know I guess I sort of we sort of expose it already + +[20:00] through Footprints I guess you know you can look at the footprint from your transaction and say like oh I've affected these entries. And so you know. If I want these entries to live on past some future point I probably want to bump them and you know what the footprint is ahead of the transaction. Because you have to provide it. So you know, that's maybe an interesting thing we can utilize. And then the second part of the contract function is decisioning about how much to bump or at least, that's one of my understanding Karen, that the contract would actually decide how much the month and, that seems less clear to me about where the responsibility lies. Because I agree with Dima like. If I'm a contract developer running contract or token contract or any sort of contract I don't really know how long this user needs this data you know you might be using the token contract for just this one transaction or for a short period of time or maybe you're you know you're locking up an asset for a long period of time it's really unclear to the contract developer. So I feel like + +[21:00] if we can find a way to keep the decision about how much to bump in the hands of the user. While not needing the user to have knowledge about the keys or maybe using the footprint in some ways so, that users or applications, that users are using you know can use the footprint to you know to discover those keys yeah I think one interesting thing might be like say like we have like this automatic bump and just allow, that to be a user-defined field in the footprint. So for instance we could say like it must be at least 10 ledgers just for the health of the network no one should be able to you know avoid automatic payments. But then you can Define an arbitrary number. Now the issue is I think, that might be a little too not granular enough. So for instance say. If I want to. If I'm praying a token + +[22:00] or creating a balance for the first time for a wallet and I want to last six months I say okay I want my default rent bump to be six months and so, that would handle you know all the keys I touch in the creating of, that new entry but. If we just have, that one number to encapsulate everything I would also be bumping the wasm by six months in the contract instance by six months. Because I touched those two as well, which is probably not what the user intended. And so I think I kind of agree with how you put it, that there's knowledge of the keys, that's the contract responsibility. And then there's the knowledge of how much you want to bump, which is a use case specific thing based on the invoker and the issue is like. If we just have one parameter, that for the default rent bump, that doesn't encapsulate knowledge about the keys. But then the issue is. If you want to say have Define a rent bump for every key in the read write footprint, that again is putting in this, that's putting + +[23:00] essentially like the knowledge of the keys on the invoker again, which is kind of something we want to get away from well yeah I mean the invoker is potentially multiple parties. Because there's the you know user to the consumer the end user. And then there's an application, that they're using to interact with the contract and, that application they're using could potentially make some sort of broad assumptions you know maybe for token contracts a an application Level Up or might be able to handle, that granularity like as you're pointing out they might be able to identify okay the balances look like this you know have this key maybe I it doesn't really extend well to other contracts, though + +[24:00] yeah. So I guess. So I don't know do we have any like key takeaways. Then I mean. Because I agree, that and I just feel like the barrier to exposing the rent interface to users can be somewhat High and I don't know. If it's a good interface. Now again like this might be you know acceptable. If you know like every like people are interacting with the network primarily through wallets and sophisticated kind of l2s like, that. But I don't know it just seems like a high onus to put on a user to essentially have to invoke periodic hose function operations in order to maintain state, that they are accessing through smart contract calls it just seems like the most reasonable interface for the caller of the contract is for the contractor to take care of the + +[25:00] contract state is it possible, that maybe we could find some okay. So I agree with what you're saying. And so I'm wondering. When we discover what the footprint for contracts are I wonder. If there's a way for us to include without some sort of this is what the contract recommends you use and maybe this is this might get too complicated. But you know. If a token contract could say to a user this is somehow defined like this is the recommended rent pump for my contract. But usually still in control or maybe, that's pointless. Because then you know everybody's just going to do what the contract says and we're essentially building something, that we don't really need the flexibility around well maybe what we could do actually say actually okay I kind of. So I think the best of the Both Worlds is we want + +[26:00] the contractor to be able to define the keys we want the user to define the amount. So maybe what we can do is instead of having the footprint parameter be the global footprint pump what we can do is we can define a new parameter called auxiliary rent bump it's just a single value inside the footprint. And then I guess we could expose this to the smart contract in some way. And then the smart contract could call the rent bump host function. But the value of the ledgers to bump must be the auxiliary value provided by the user. And so I think this still has some potential gamification or griefing. If the contract would use, that to bump keys, that are not relevant to the user I mean I feel like, that's just a, that's a class of bug, that just needs to be audited and I think, that's kind of you know there are many smart contract bugs, that fall into, that area. But then I think this might provide you like a good middle ground where the contract can still say + +[27:00] what keys need to be bumped and the user can supply the value. And so say this value could default to zero just for safety reasons. But then say. If a user is accessing their contract or their token wallet they could say, that I want to rent Bump by six months everything, that the contract deems should be bumped, which they signify by calling like this auxiliary rent bump done inside the smart contract some thoughts on this approach yeah I think something like, that could work pretty well and either presenting, that how you just described as you know you just provide one Valley in the transaction or maybe you could actually just provide a value next to every footprint, which they'd functionally worked the same way. But you know it would give you a little bit of flexibility for some power use cases where maybe you maybe a power user is wanting to + +[28:00] specify explicit footprint entries is having more. But I did I think, that ID does start to shift the responsibilities into the right places yeah I'm not necessarily against the footprints and it's always the same wondering about is how much more useful reads than let's say yeah separate cost functions it does the same Plus the contract and just a little bit worried about mix in a lot of things together right. So you have a transaction with a plus something useful make a payment for example right. But then you also have this another completely unrelated operation this transaction, which is Bump + +[29:00] rent instead of relevant entries by kind of like by time periods you have defined. So doesn't really need to be the same operation maybe it is okay to just have this as two separate operations it can be composed needed well I mean what would, that look like then. Because I mean I think the issue is the set of the knowledge of the set of keys, that needs to be bound right where I think I agree, that like the per key approach could be good for power users I still think we need like a you know one size fits all I want to bump by this amount. Because I could see from a user perspective saying you know bump this set of keys by six months you have to define the set of keys and all, that sort of stuff but. If you're sending transactions and be like I want all my things to live for six months. So I can say send this transaction rent amount six months + +[30:00] and. Then the smart contract developers determine what is important to, that user to fill into, that auxiliary space I feel like, that works for most use cases again we are kind of mixing up what user is like do people really like. If they think about how our contracts would be used right chances are probably there is some Gap, that like with some web apps, that actually builds the transactions for you like whereas the rain should fit it like I'm writing. So you had DEX right today really expose rent bumps in my UI to the users like how's it would work. So I think, that there's I think, that's a really good question Dima and I think there's there are two answers to this I think there's one class of interactions, that come directly from adapt front end and from, that perspective you're right like the dApp + +[31:00] knows exactly what Ledger Keys an account refers to. Because it's the same developer who wrote the smart contract there's another class of interactions, that's like more generalized interactions these happen either through like wallet interface like General wallet interfaces or they happen maybe through you know various compositions like I'm not using an AMM directly. But I'm using it through some sort of like an aggregator or through like a balancer like contract, that actually splits my liquidity between different contracts. So I think the like what you're describing it like directly interacting with Ledger keys, that only happens in like the one case where the origin of the transaction is you know is kind of + +[32:00] developed by the same developer of the smart contract but, that's not the general use case oh what I mean is actually not who knows the keys what I mean is what is the UI or ux for actually setting up zero. So let's say we go for this idea whenever you know you can somehow pass some additional rent balance as a part of your transaction, that does something else. Then the question is like no matter like who knows the keys how the user would actually Supply this number like I really feel like this should be progress about responsibility most of the time, that's really sometimes it you need to bump probably has something to do with something your own and this is something your voltage is responsible for. So I'm yeah I'm not sure. If it works well it's + +[33:00] a model or some other entities and wallet builds a transaction. Because as I said like you're doing something completely unrelated leads like especially. When you are talking about some more complex religion scenarios right how would you even expose some brand pumps to the user leak just I'm sorry arbitrary they are about to trade these tokens okay how about bumping some random major entries I don't know doesn't seem like they're right you asked to me, which is why I was kind of arguing about this idea about just having a standalone process for this, which I agree has its own issues. But at least you know the your story is pretty clear here, that hey you have a vote for example and my interests you know balance lifetime very connectors maybe I'm missing something fundamental + +[34:00] about what you like how things work. But he just really seems like two different things, that we want to bundle together in the same transaction and one of the things is not completely obvious to think to, that main use case as I think regardless of whether we bundle it or not the problem, that you're presenting you know who's responsible for it you know does the wallet just do it for you, that problem exists so. If we bundle it the problem exists. If we don't bundle it problem still exists and to Thomas point about you know the different use cases and your point about wood wallages too and I think, that's, that's fair like consumer wallets are probably going to make some default decision around you want to have six months or a year for your balance. And then you can recover from the archive and it's those other power users, that are maybe gonna or those other composition use cases where the rent is going to become more + +[35:00] interesting. Because maybe in a composition use case you don't need, that balance to have a starting rent of it six months, that's it might be a waste of ease maybe you only need it for a day the thing is like from the for the wild example I think the issue is, that. If we don't Define or. If we don't allow contracts to Define rental Behavior the issue is token or wallet speak erc20, which is an interface whereas rent bumps aren't an interface issue they're an implementation issue. So two different tokens could just be erc20 compliant. But Implement erc20 in different ways. So token a might have two entries you need to bump whereas token B just has one entry you need to bump. So I feel like just kind of saying oh hey the you know middle the middleware software is sophisticated enough to know how to bump I don't know. If that's an assumption, that can be made I mean I think we'd essentially like for wallets we'd need like an erc20 rent version + +[36:00] that defines rent behavior in a way. Because I feel like I just think, that just like the key structure is just. So integral. And so implementation specific I'm skeptical of leaving, that up to anyone other than the contract developer as long as there was an interesting question from what's in the chapter regardings something like a liability balance and I think and as we're end goal we haven't been talking much about this weather some cases the rent payments are needed at all. Because well interest can be unarchived and in case of this liability balance for example yes it's maybe not in the user interest for this entry to leave one kinds of Ledger but. If you are using the unique storage right it shouldn't be possible to at + +[37:00] least recreate the sanction, which the contract developer could leverage I'm not really familiar with this liability bones use cases like how to force the user to actually pay your availability. But you know. If you want them to continue their contract you can just benefit from the unique entry and it doesn't really matter. If it got archived right. Because it won't be recreatable and users just won't be able to reuse the contract I don't know what is the other unfortunately mechanisms. But basically the point I wanted to make it's probably not super bad. If interest sometimes get evicted from the ledgers this is kind of by Design especially. If it's rarely accessible and it's a downside hopefully isn't is, that huge. So probably, that. While we are talking here about how bumps work we should recognize, that you know + +[38:00] we should make sure, that the ux for actually restoring the entry in Ledger is saying. Now so, that even. If you cannot mess up the pump project you still not In Too Deep of a trouble, which I don't know. If anything something else for the liability use case in the liability case you know the library still exists right just it may not be unledgeable. Because it expired. So and anyone can recreate it. So I don't know. If it's a is, that an actual concern right as long as you understand, that the liability is there like you know you could anyone can recreate it and put it back in the Ledger or I think recreate's not the correct word here. But restore it. Because like in Europe in the example you gave essentially what would happen is. If it expired and a user tried to do it's you + +[39:00] you. If you use the unique storage interface, which you should for this particular use case. Then the contract knows, that the thing is in the archive. Because that's one of the benefits of unique storage is, that it essentially provides information. If the entries in the archive are not. And so through the unique storage interface you know, that the liability exists somewhere. And so the transaction just fails. But the liabilities not live. Because you can't check it. And so I don't think there's a security risk in the rent. Because I think rent should never be used for like lifetime management or something like, that's a temporary entry concern. So I think like concerns as to like expiration are handled via the recreatable and unique interface rent is just more of a convenience factor. Because how you pay rent shouldn't have any functional differences it might just make your operation slower. If you need to go restore something. But I think high level here I think + +[40:00] kind of what I'm understanding is we probably want to investigate rent information the footprint I think whether, that looks like a per key rent or an overall rent bump or something like, that is still TBD. But I think. If I understand correctly, that's kind of demo, that is, that kind of like I don't know fulfills your you're not allowing smart contracts to game the system. But kind of still allowing smart contracts to Define some behaviors at I guess investing is something the footprint is, that kind of the idea we want to go forward with on this issue yeah and is, that the way like I'm not necessarily against this. But like I see some issues with this approach. But probably it makes more sense than just you know asking contract developer to write the manual pumps it's both complicated and those are wrong yeah and I think we really need to + +[41:00] think about the initial balances like even, though as I mentioned lately it might be kind of problematic to Define it in a general case we should think about what is the same default value very easily like even. If it's not customizable. Because it's kind of an interesting problem as well right like you don't want to create an entry, that lives like for a really short period of time, that you don't want to overcharge users well we actually have a implementation specific value for this, which is I think at the bottom of the rent proposal, which is based on the structure of the bucket list and how archiving Works minimum rent balance has to be enough rent for the entry to live into level six, which I believe is 4 000 ledgers. And so we can make the default higher than, that but, that is the absolute minimum just from an implementation standpoint the minimum has to be at least four thousand ledgers. If that value is too low then. If like + +[42:00] I think, that's what four thousand times five seconds I think, that's like a day or something maybe 12 hours exactly. But I mean. If that's too low we can increase it. But we can't go lower than, that and but, that's kind of interesting right. Because if you think about something you can balance you would think like about months of life and five hours is like why less than, that. So yeah, that's what I'm saying like I guess we will need to see what's the actual prices will be. Because because it seems really counterintuitive, that the entries, that are kind of supposed to be permanent entries in The Ledger are at the same time very short Cliff. Because we hopefully will be able to create temporary entries, that are alive for let's say weeks right. But at the same time I think there's also a valid use case for a qualm quote short-lived short-lived for instance like. If I just want to initialize a cold store wallet I like + +[43:00] initialize it transfer a million USDC. And then don't want to use it for 10 years I mean it makes sense to just give the minimum rent balance pay, that. And then let go into the archive. Because I'm not going to use it for a. While so I think, that there's still I think keeping the minimum low probably provides the most flexibility. But I guess kind of what I was imagining from a functional standpoint is you would probably want to just create all entries with the minimum rent balance initially. But then be able to essentially like bundle and immediately run bump was kind of what I had in mind from a from an implementation standpoint, that would probably be easiest now. If we don't provide a host function or. If we don't provide a footprint approach, that becomes challenging. Because you essentially have to bundle operations at, that point, which I don't think would be really possible to implement you'd have to implement, that not in the sore bomb. But somewhere else + +[44:00] but I think. If we have some sort of footprint approach I think, that would probably be the best way is just to default initialize every restorable entry with the four thousand letters worth of rent and then. If the footprint defines any additional bumps provide those Bumps by the footprint interface yeah I think, that sounds really good I and I just sort of lean further on it I do think I assume we're still talking about the contract still is the one, that can do, that auxiliary bump. So ideally you know in the best case scenario the contract developer has given this thought and they're acting in good faith sorry they're doing the right thing and they're just using, that auxiliary bump for the records, that need to be used. So they might not exactly re-bump everything. But the worst case scenario is, that they do you know they don't really think about it and they do bump everything in the footprint. So as a user you do sort of + +[45:00] have a some boundaries in place it's not like a malicious user can do lots of damage they can at worse only bump everything, that's in the footprint, which would be of limited size anyway yeah and in the case, that the contract is malicious. Then like a powerful a power user or. If a power user just thought they knew better than the contract they could always just set their you know auxiliary bump to zero. And then even. If the smart contract tried to bump everything in the footprint you wouldn't be screwed and you could just essentially get around the auxiliary Behavior. If you had motivation to do. So I think oh sorry timer no I just wanted to say, that I wouldn't put too much effort into protecting against like malicious smart contracts you know. If a smart contract is malicious there's like plenty of other ways, that it can screw you over. And so trying to protect the against malicious smart contractors trying to protect against like buggy smart contracts is you know it's a + +[46:00] it's a fight you can't win yeah I actually yeah I think, that's yeah I wanted to say I don't fully agree with this. Because really. If you think about us like any token operations need to be explicitly signed and wait my concern about all this customizable pump stuff is, that basically you kind of are doing very implicit operation on a token, which is much added to interpret. Because other reason, that really things, that kind of can impact the user are probably out authorized explicitly. While here you have something implicit, which is why I'm more concerned about, that than about the contracts they just trying to please show like 100 XLM from you for no good reason. Because that's at least very easy to trace and explain whereas like + +[47:00] you know with you're paying rent for something you shouldn't be paying rent for much harder to evaluate you'll be able to print everything. But I wouldn't just dismiss the same it cannot do anything I think we kind of can try to be a bit more conservative here I mean like it's not like this is you're signing a transaction this is coming out of your wallet I mean all of these fees are coming from refundable fee right like even. If you do like the you know auxiliary rent bump is still coming out refundable fee. And so you have an upper bound right. And so I guess feel like. If a contract is malicious or buggy I mean you pre-flight and you're like oh wow this refundable fee is like absurdly high you're not going to submit, that transaction. And so I think. Because we kind of have like this safe bound both. If we provide a parameter in the footprint to Define the amount of bump to rent, that's one bound plus just the amount of fees you + +[48:00] have is another Bound in refundable fee. So I feel like those two things bound the damage pretty well such, that I think, that the rewards the ux rewards outweigh the potential drawbacks just. Because it's. So bounded and. Because it's fee based and not token based yeah I mean. If he exposes refundable fee in explicit Fashions and great kind of voluminous same book yeah just to be clear all rent payments come from a fundable fee both the automatic payments and the auxiliary rent bump host function would also draw the funds from refundable fee cool I think just responding to China's comment about you know let's not focus too heavily on malicious I get it I think this design does lender itself well to just + +[49:00] separating the responsibility. So they land on you know the response to parties, that are best equipped to handle those responsibilities the fact and I think just naturally out of, that you know we get a slightly better story for what can a contract do malicious for you. But I don't think this is really fully solves, that problem like you said. So and nor do we nor can we I mean I think it puts sufficient boundaries in place, that any sort of malicious behavior wouldn't be outrageous. So I think we probably have a pretty good feel of the rent pump issue. If if there are any final comments I think we have like 10 minutes left. If we want to move on to a second topic any last parting words. So + +[50:00] we don't actually have a second topic for today or I mean it might be a second rent topic I mean sorry oh okay there's the second one topic. And then the third and the fourth sorry yeah I think like it's very clear, that a it sounds like in terms of like implementing and move forward you know obviously it's the bumping by Ledger key is a building block, that we need anyway. So we can you know keep in implementing in, that direction and it does sound like we need to potentially like enumerate all the options for how the contract exposes you know more fine great information about you know what Ledger keys are actually associated with an account whether it's through the host function you describe whether it's through the Ledger footprint maybe it's even just I don't know like an ecosystem standard for you know there's a function you can call, that gives you + +[51:00] all the Ledger keys, that are associated with an account. So I think we just need to like enumerate over all of these and probably like revisit them no I think, that makes sense I think we probably need to iterate on this design a little bit. But I think we have some good building blocks from the conversation. Now yeah. So I guess we all have a ton of time. But I'll introduce this I'm kind of the second rent related issue and we also have a temporary entry related issue, that we won't get to today. But for rent there's also an issue of outstanding rent balance. When things are deleted. So I think I don't know how often this will actually come up in practice as two entries being deleted with large amounts of outstanding rent balance. But initially it seems reasonable to refund the rent balance to the invoker of the delete operation + +[52:00] this kind of provides network incentive to delete entries, which we do want to have. Because we want you know. If an entry is not going to be used we want to provide incentives somehow for it to be deleted before it hits the archive. Because once it hits the archive it stored in the archive indefinitely Or Intel restored at least. And so to keep the archive size small even, though it's not as big of an issue as having a large Mouse and The Ledger we still want to incentivize deletion the issue is. If you provide rent balance refunds on deletion you open the door to some sophisticated tax and to some kind of patterns. So the issue is, that even, though many different users might be contributing to the rent fund or many different accounts might be funding the rent balance only a single account redeems the rewards. When it's deleted and especially since we are defining this automatic rent bump at the protocol layer this could lead open the door to some interesting attacks. So + +[53:00] for instance say you have like a contract, that's used very often like an AMM or a big DEX what they could do is they could just touch one key on every invocation and it could have like an innocuous name it could be called like off entry or like auth check or something like, that something, that would be difficult to detect in the code auditing. And then they could just like essentially have 10 XLM or whatever like or 10 letters worth of rent or the minimum rent balance just continually be added to this object. And then periodically the admin could. Then call an admin function, that just deletes and recreates this object essentially getting all the rent balance this is kind of a malicious attack. But even a non-malicious attack you could imagine where many different users are contributing to like some swap or something like, that the swap has a large amount of rent. And then whoever like liquidates the swap or liquidates or drains liquidity pull and deletes the entry can also get this additional rent balance, that was paid for by many different users and so. Because of these + +[54:00] two issues it seems, that we don't want to refund rent balance on deletion just have any outstanding rent balance be burned whenever you delete something, that doesn't seem like a great interface. But there doesn't seem to be a good way of refunding rent without running into these weird edge cases, that can be gamed. So I guess thoughts in General on rent refunds and. If we should think about this in a different way just wanted to mention, that we have considered an alternative for we would record, which account has created the entry. And then refunds, that account. But besides the fact, that it doesn't avoid all the possible issues the problem with this approach is, that it's really like adding a significant amount of Ledger space and it's not obvious whether the additional fee incurred by the fact, that you are writing bigger entries will be offset + +[55:00] enough with your refunds I mean we have discussed this in a bit different context. But I think it still applies here like we are increasing entries sometimes quite significantly and we want to refund someone. So it's not clear. If this is not positive or not, which is why, that's probably not a good option I would also say, that the, that particular alternative was in the context of temporary entries, which have a single pair I don't think, that approach would work in the restorable entries with rent balances. Because many different users in addition to the user, that created could be paying into rent fees. So for instance you can imagine like the contract admin created the entry with the minimum balance. And then every user the contract bumps it up to a very high state or to a very large amount of rent. Because it was used frequently in touch frequently it doesn't make sense, that the outstanding balance should go to the + +[56:00] original Creator who only paid them rent. So yeah context of 10 Ventures, that works. But doesn't really work in the restorable entry context yes it's kind of fair. But also I want to say, that you do not allow the leading contracts currently and they I don't think people. So or this is not this is for contract data not for contract instances right yeah. But but yeah I imagine like the most common shared data is through the contract instance. But but yeah anyway I just wanted to say, that the alternative was out there and yeah as you said it doesn't solve those issues and it also has a significant cost attached to it, which is why yeah I guess are there any objections to burning rent or outstanding rent balances. Because it seems on paper to not be a great interface. But I think it's probably the best and most Fair way to go about things + +[57:00] Garen how is this handled in other networks or is this just in the context of the state inspiration this is just in the context of State expiration no other network has like rent balances like this. So I don't think there's really any pre-context or right. But on the EVM you do get a refund for clearing space right yeah. So you'd still like get Base reserve back and stuff like, that. But I think it's difficult. Because I mean on the EVM there's still like one pair. And then the refund is relatively small whereas here the rent balance could be a somewhat significant value potentially and there's also multiple payers, which complicate things a little bit more okay. So I think we're at time and obviously I think we all need to think about, that + +[58:00] some more. So maybe start a thread in sorobondev on refunds and we can revisit, that later sounds good well thank you all foreign thank you all and see you next week. + +
+ +## Write Your First `Hello World` to Ledger Storage {#part-2} + + + +This workshop segment walks through writing and testing Soroban smart contracts using Rust and the Soroban SDK, with a strong focus on developer experience. Live demos show how to compile, invoke, and iterate on contracts entirely in a browser-based environment. + +The session progresses from a basic “Hello World” contract to stateful contracts that store and retrieve data from ledger storage, demonstrating how Soroban abstracts serialization, storage access, and testing while remaining explicit about execution and state. + +### Key Topics + +- Writing minimal Soroban contracts in Rust using `#![no_std]` and the Soroban SDK +- Contract functions, return values, and logging via the host environment +- Using `Symbol` for efficient on-chain data representation +- Interactive contract invocation via a browser-based playground +- Ledger storage basics: + - `env.storage().set()` and `env.storage().get()` + - Handling optional values and type safety +- Storing state keyed by addresses instead of global keys +- Testing and iterating on stateful contracts without deploying to a live network +- Soroban’s “batteries included” philosophy for developer productivity + +### Resources + +- [Soroban Rust SDK](https://github.com/stellar/rs-soroban-sdk) +- [Rust documentation](https://docs.rs/soroban-sdk) + +
+ Video Transcript + +[00:00] Hello everyone. Paul can hear me. I can hear you. Yeah, can you hear jittery, is it okay? Let's see what about my volume? Is it good enough? Yeah, you're all good. + +[01:00] Thank you, actually get out of the camera. Hi everyone, we are getting started. How is everyone doing today? All right, give us a minute here as we get set up for our sorbonne developer Workshop. We are writing our first smart contract on sorbonne and we have Morgan and Paul joining us on stream here today to do this Workshop. Hey, Morgan, hey Paul, how are you all doing? Hey, everyone doing good. Yeah, going good here. How are you new here? I'm doing well. I am in the SDF office. It's been so exciting. The DevRel team has been expanding and we're onboarding so many new developers and it's been exciting. Sorbond's really changing + +[02:00] And we're innovating, and I can't wait to see what more is going to come out of this. But all right, oh, you were at Paris blockchain week, right, Morgan? So what is this? That's true, I, yes here. I feel like this is what the SDF office looks like. Everyone, anyone- yes, I think we should do like a live stream tour of SDF's California SF headquarters. I think that would make a fun: stream everybody's work and see what they're doing, just like, pop around and interact with people. I'd love to hear that. What's with them with the Media Kit in the background? Yeah, we'll find out with. Yeah, I know, but so many comments here. Cool single monitors only. Yeah, y'all, we have + +[03:00] Only single monitors around in office. I will let them know. We need curved monitors. We need all that cool Tech. But, yeah, all right, without further Ado, Paul Morgan, I will let you all kick off about this workshop and talk about what you presented at Paris blockchain week, and I'll let you start off. Paul cool, yeah, I think I'm presenting here. How do I make that happen on the screen? There we go. Yeah, cool, that looked like it a second ago. Yeah, that's the one. Cool. Yeah, welcome everybody. Good morning, good evening, good night. Wherever you happen to be, welcome to live stream. I'm writing your first smart contract on sorbom. Here's what we're gonna be going through today. So, first off, we'll do a bit of an intro. We'll talk a bit about what is Soroban. For those that don't know, we'll have a cool hello world demo from Morgan and we'll talk a little bit about why Soroban's important, why + +[04:00] It matters, why it's good, why you should build everything on it. And then we'll have a bit more for Morgan on his kind of journey and how he got to sorbonne and how I got involved with all this, and then Tyler will share a bit on how you can get involved, and then I will wrap up from there and we'll do some questions and stuff at the end. So first off, who are we right? My name is Paul. I will start there. I'm a Staff engineer at the Stellar Development Foundation, which means I get to work on Soroban every day, building and making it happen for all of you. So I'm really excited to be here today to share some of the stuff that I've been working on. What is the Stellar Development Foundation? If you don't know and you've somehow stumbled into this stream, anyway, the SDF works with the Stellar Community, which is all of you to build the Stellar network. is a fast and reliable L1 blockchain with finality in + +[05:00] Just five seconds. The network's been live since about 2015. It and has seen sustained real world usage of 150 transactions a second with literally no problem just cruising along, which is pretty crazy to think about. It has a global network of anchors- it's what we call- which are on and off ramps, issuing on chain versions of assets from the real world, including cash access, which we'll talk about later. And for the last year, we've been building something new: sarban. So what is Soroban? So sorbonne is a new add on to the Stellar network. Traditionally, the vanilla stoneware network didn't have Smart contracts. There were. There was a few L2 attempts, right like Turrets that some of you might know, but they had some issues around that, like atomicity, yeah, and other things like that. So we wanted to open up more decentralized Innovation to the network and to open up new opportunities to the underserved and unbanked populations that Stellar's built to serve. + +[06:00] And I mean my big thing is payments is- such a small part of the financial world. It's like an important thing to get right, but it's just kind of the base layer of what we can build here. So we announced the project that would come to be known as Soroban back in January of last year. Since then we've shared eight, I think, iterative releases- yeah, I want to preview eight. Now onto our experimental test net, known as futurenet, so devs can experiment, learn and contribute to the design discussions. We've been regularly running a program called sore bonathon- we'll hear a bit more about that later from Tyler- and we regularly discuss design choices and onboard new devs to the community on our Discord. The community interactions with all of you lovely folks who are now in the Stellar Community- sorry, I don't make the rules but I will enforce them. So you are going to be part of the community. Now. The community interactions and the public input has been really exciting to see. That's been my favorite part of this journey. It's been absolutely mind boggling + +[07:00] It's impacted almost everything about sorbonne, right from naming to authentication- how does that work- to how Soroban should integrate with the existing vanilla Stellar network and while sarban isn't tightly covered, coupled to Stellar specifically, it is designed to work well with the network. So a bunch of stuff on that point. It's written end to end in Rust for Speed, efficiency and access to the ecosystem that exists there. It comes with a bunch of plug and play, SDKs for simple to complicated authorization models, and it's built to scale right because it has to keep up with the rest of the existing network. It can't slow down the network. But the thing I'm probably most excited about in Soroban and looking forward to seeing is how it's empowering all of you in the community. Now that you can build on Stellar completely Unchained- hey, I'm sorry if I didn't make that joke. Tyler said he'd fire me, so it had to happen + +[08:00] Right at this point I'd like to introduce Morgan. He's a founder of useorbonda app. He's a Community member who's been building some incredible stuff to help onboard other devs to Soroban and he's going to show us a bit about what he's been working on. Hello everyone, I'm gonna start buying. Sharing my screen should have done this earlier. All right. So here's a thing that every developer starts their programming Journeys with. We all like to say hello and we all like to say hello to the world as the very first thing that we do in our programs- and you know, almost everyone starts their Journey that way- and, in fact, the time it takes for someone to say hello world in a specific programming environment maybe the determining Factor if they're + +[09:00] Going to continue their Journey in that platform or not. So I definitely want you guys to stick around for the rest of the talk, so I'll just get started with the hello world part. All right, so what do we have here? As Paul mentioned, I am the founder of usurubond app which is this tool that you see right here. It is a developer playground for saurabhan smart contracts. Now, what do we see on screen exactly? Well, I'm logged in, so I have access to the Gated Alpha here and I have the ability to create a new project. So let's go ahead and do that. New project pops up and let's go ahead and delete everything, because we're going to start from scratch. What's this environment all about? Well, + +[10:00] First we have a code Editor to the left here. This is where we'll just put our rust slash sorobong. This playground runs on Rust and the rest soar about SDK you'll, as you continue your journey, you'll find out that there's other SDKs for sword Bond available. For now we're going to deal with rust because that's the sort of reference implementation. All right. So we have the code editor, we'll put our rest in and sort by and then we have some widgets. So right now, console is open, code is open and let's get started with the hello world part. So how do we do that in rest? Well, I guess we write a hello function and we call the print line macro and say, hello, okay, compile it. And that worked. This is a rest program as it should be compiled, + +[11:00] And I abstract all the details for creating cargo projects and the defining targets and so forth. So you just sort of write some code and click compile in. You know, you're Off to the Races. Morgan, I'm going to interrupt you for just one second. Can you zoom in a little bit more, because we can't see it? A little bit more. How about now? A lot more, yeah, a little bit more. I would say, yeah, it will be unusable if I zoom in any further. So I do apart for anyone: or we can hide my face, there's a lot of stuff on screen that's not the code. So we can just sort of focus on the code. Here we go, there we go, sweeter, like I'm almost blind. And there you go. Okay, sweet, we got one confirmation. I can see it. Fine, carry on. And everyone can hide the faces because, like, the vertical space might be useful, + +[12:00] Nothing but the screen share. All right, perfect, thank you. All right, carrying on. So print line hello didn't work. Well, at least it compiled and didn't produce what we wanted. Why is the question? And well, we're working in a soybean environment, right? Soroban expects a certain kind of format for these things to work and everything that we want to be able to call has to live in a contract. So contracts are struck in. Rest, we're going to declare a contract struct. This name can be anything you want: call contract because it couldn't come up with a better name. All right, so we go ahead and Define an implementation for contract. Let's go ahead and copy this from here. + +[13:00] Okay, all right, compile this again. And that worked again, no compiler errors. But the problem is we still don't see any hellos popping up. So that's not great. Ideally, you want to work with the SDK here, because you know, a structure like this is not really sort of buying. We got to tell it to do a little bit more, so for that we're going to use the Soroban SDK and we're going to import the contract implementation macro and that's going to do a bunch of things. But essentially this will convert this implementation right here into a Soroban contract. And okay, we have a few errors over here. By the way, Russ is super famous for really good compiler errors and in this + +[14:00] Environment, which is enabled by Soroban, the way it's designed to leverage webassembly and rust, so we'll get those beautiful Russ. Compiler errors and we can see that there's some problems with the Panic implementation, although it's not entirely clear what's going on. So I'll just sort of skip the mystery here and solve this problem by finding this macro right here. So contracts on the blockchain have to be super efficient in how to use the available Ledger space and rest programs. Even though they're pretty light, they still carry a bunch of stuff that we don't really need as part of our contracts. So to eliminate everything that we don't need and just get the rest that you know, the basic rest of that is required for + +[15:00] Contract logic, will we declare the no STD macro. This unfortunately removes the ability for us to use the print line macro to print this stuff, and okay, so let's get rid of that. Now we've back to a solid compiling contract here and let's go ahead and declare this function as a public function. Now we have something on screen up here that's looking like a button. So in this actions widget, which is sort of the a third of this interface in the middle, is where you will be able to click and interact with every function that your contract declares. So our contract right here declares a single hello function. We can call it and we'll see in the console a return value. So it's implied that a function without an explicit return value returns none in + +[16:00] Rest. So same applies here and we're going to return value. Now I guess we can leverage this return Dynamic to finally say hello to everyone tuning in, and for that we will need a special type. So again, since we can't use the rest strain- it's unavailable- we have to figure a different way to carry our information and for that we're going to use a symbol, which is an efficient data type in sorobind for storing alphanumeric characters, essentially all the letters of the alphabet plus the digits and the underscore. So what we're going to do with symbol is we're going to initialize a short symbol that says hello, and short just means that this sequence of characters will be able to fit in a single 64 bit value, which is pretty + +[17:00] Important when it comes to this context. Let's recompile this again and oh, thank you, rust compiler. We are returning a value, so let's make sure that we declare what we're returning and let's call this all right. So we have, a return value. Hello from this function right here. And this is sort of your basic, most basic implementation of a sorobind contract that says hello to anyone tuning in. Now let's make it a little bit more spicy and let's use a little bit more of the environment here. So let's say we're going to log a value. Everybody loves blogging. As a- you know someone coming in from a web developer background, I use console log all the time and console log is the best. So + +[18:00] We have an equivalent in sorobind. It's called log, the log macro, and we're going to declare a new function called hello2. And what are we going to do here? We're going to gonna say hello to a specific name. It's going to be a symbol and we're not going to return anything. Okay, so how do we interact with this log macro? Well, it's a macro, so we use a bang, and then the first argument is the environment within which we're logging this value. And you'll get used to this concept of every single function that your contract has that interacts with anything else. Anything in the outside world will have to have an environment as the very first argument. It's kind of like your access to everything. So log accepts the first argument being the environment. So we're just + +[19:00] Going to pass, in reference to the environment, next up we have a string and we're going to use hello, bang, and then we're going to provide the name. Let's compile this. Oh, we're missing environment in this import, and a handy feature is, if you're having problems with a specific type or you're missing something, you can go ahead and click links right inside of your compiler widget. That will take you to the documentation. So it's an easy feedback loop. So now that we've imported an environment, we can recompile this again and try saying hello to twitch- I guess is where most of you are. And in this console widget we can see that in between hello too, which is the function that we're calling, and the return value, we have a log that says + +[20:00] Hello, twitch. All right. So I'm almost done with the first demo. Anyone and everyone that's tuning in you can actually test this on your phone. So this is going to be a multi modal demo and for that I'm gonna take my phone here. So I just popped up this share modal. I'm going to take my phone. This is an actual phone I have here. Bring up the camera. Hey, everyone and I'll go ahead and look at the QR code here. Tap on usurabond app and we can see we're taken to a version of usurubundan app, which is mobile optimized. And you know, we gotta work on mobile experiences just as much as a desktop experience and someone that's designing these apps. We've got to be mindful that's where our audience is. So this kind of interface will allow users to test this contract anywhere + +[21:00] They are, especially on the mobile phone. So we have access to the same sort of widgets. We have the console open and actions is a sheet down at the bottom. We can go ahead and call hello like we did before. Quick access down at the bottom to the previously called functions, and we can call hello to and let's do twitch, confirm. And then we can call this function with the same arguments if we want to, or we can change the arguments and do YouTube. Okay, all right. So that does conclude the demo. Everyone that scan the QR code and we're able to interact with the app here. Congratulations if. Right now, this is still in a gated Alpha. If you want access to the current build, please tweet me on at sorobine Dev on Twitter and I'll make that happen. + +[22:00] All right, so I'm handing it off Back To You, Paul. We go. Okay, I'm back. Thanks, Morgan. Can we get the slides up again? Cool, thank you, yeah, so that's. Morgan has shown us the basics of Soroban, but I want to go into what actually makes it so special and what makes it so exciting here. So, first off, it is batteries included for devs. That's our goal here, right, when we're designing it, everything we're thinking about designing it. That's our goal and that means we're including everything from Storage to authentication and authorization Frameworks to token minting and transfers. All that's baked in, right, it's ready to go, so you can focus on your + +[23:00] Application and not spend ages trying to figure out how to like verify an ed 25519 signature right, like. You shouldn't have to worry about that. And secondly, unlike pretty much any other smart Contracting platform out there, because we're launching on the Stellar network, you get access to all the existing assets on the vanilla Stellar network- and I want to address a comment in chat, I think from Matthias said I should call it classic and not vanilla. And I'm calling it vanilla for a very specific reason: right, because the existing Stellar network is not going anywhere. Right, that's still all staying there. It's not being deprecated. It's not a classic thing, right, it's just a different flavor of the same thing. It'll still be the fastest, cheapest way to use the network. But if you need a bit more power, that's what Soroban's for. Okay, but building on the existing Stellar network means that from day one of launch, your smart contracts can hold and use USDC right and many other assets that are + +[24:00] Already on the network. They also have access to Stellar's cash on and off ramp via our partnership with MoneyGram, which gives, which works in over 200 countries worldwide- right, so, putting this together, we could think of someone, let's say Julia. Right, she doesn't have a bank account. She can go to her local MoneyGram location, deposit cash into the kiosk. She receives USDC on the Stellar network in her wallet and now, with Soroban, she can take that USDC and deposit it into- I mean a lot of things. Right, whatever you're building, but let's say, an income generating DeFi protocol where she receives a portion of transaction fees for providing liquidity. Right, the partnership between Stellar's anchor network and MoneyGram allows users like Julia to move cash straight from their physical real wallet to a DeFi protocol and vice versa, which is something that I don't think's ever been seen on any other blockchain network so far. Yeah, and the third Point here I want to talk about, what makes Soroban super special + +[25:00] Is the scaling optimizations that we're doing. So, when designing Soroban, we've been able to learn from the state of the art and hand pick the best solutions from each one. So we've designed the transaction execution model, the scale across multiple cores. We've calibrated the fee model to maximize throughput and minimize cost while protecting the existing Stellar network traffic. So we've designed a lot of optimizations around the contracts themselves as well that you don't see on every other chain. So, for example, if you look at near- not to pick on them, I love near, but they do this in the near contracts- you need to do your own serialization, the deserialization of arguments when you get stuff in and out. That means you need to compile that into your contract and deploy it as well, right, so you need to include that library in Soroban. We've pushed as much as we can out to the host environment where we can say: batteries included, right, which lets us massively reduce the contract size. And you don't need to pay to deploy that code. Or your + +[26:00] Users don't need to pay to run it, right, We can take that a little bit further with built in contracts. So we have an idea in Soroban of built in contracts similar to like pre compiles you might know of us on other network. So currently this is just for the standard asset contract and it means that it runs Ultra fast and cheap right and it lets contract standardize on the interface within the ecosystem and that's the standard asset contract. That we have in Soroban is how Stellar assets are exposed in Soroban. They look just like any other contract to your contract, so it's really easy to integrate with existing Stellar assets. The last thing here: that's really cool. We're actually working on solving State bloat with our state expiration model. So on other chains you store some data on chain and it's there forever right. Well, forever right. In air quotes we have a rent based system with + +[27:00] Evictions and Restorations to ensure the Chain's working data set stays as lean as possible. So once your rent expires your data gets evicted and you can pay to have it restored back on chain if you need it. We can do a deep dive on that another day. It's a bit involved and actually we're still designing some of those elements right now, literally right now, on the call on Discord. So at this point I think Morgan can introduce this a bit: how stateful contracts work on sarban and get some of the basics of those in foreign. I'm Paul's back too. Hey, Paul, + +[28:00] I am muscle back. Yeah, we're waiting for my screen share. You are waiting for a screen share of Morgan. There you go. Okay, all right, thank you anyway. Yeah, thank you, Paul. So, like Paul mentioned, there's quite a bit of innovation going on sword I'm, and the deeper you go into it, the more you can sort of understand. Like you mentioned, there's a design discussion going on right now. We're about to wrap up. Every week, I think they go on, but anyhow, let's discuss the sore month State of Mind, specifically when it comes to storage and preserving things in The Ledger. So as I open this up, I want to hide my face and then focus on the code again, if + +[29:00] All right, perfect. So we're back to a brand new project. I think we should rename this to: so we're working on the state, on the sword State, okay, so sooner or later, every developer finds themselves in a situation where their application is not working as expected, I would say: raise your hand if that happened to you, but I can't see you. So I assume everyone's had this problem and we all know the techniques of how to solve these things, like unit testing or logging intermittent values- using break points is a cool way to inspect stack frames and so forth- but unfortunately, smart contract developers often don't have a rapid workflow for finding bugs + +[30:00] Or faults in the contracts and you know this leaves them with the only option of: hey, I want to test something out. Well, what do I take? The current code for the contract that I have and I either one of two options: I deploy this to the test net of your blockchain that you're working on and I test it against the wallet, and that's going to be, if you're a very efficient, under a minute or it could take a little longer. Or your second option: if the blockchain is well equipped for developers, let's say you can probably deploy to a local Docker running instance. So that's all fine and dandy, but air is the limit and folks, I guess, tend to not test as much as they're used to because of all the hurdles. And so Soroban takes the challenge, hit on by providing + +[31:00] A guest environment right in the SDK. So you'll be able to see in the documentation, if you follow along this path, where you read a contract and immediately you read a unit test for a function, and this is using your regular rust cargo unit tests. With usurubon app I leverage that functionality of the of an environment provide features to rapidly test your contracts. So right here we're gonna explore. State because let's you know, face it, if we're writing hello world, probably bugs won't affect anything too much. If we're dealing with State, then bugs all of a sudden become an issue. All right. So let's begin. How do we store something on The Ledger? We're gonna head and start with the same + +[32:00] No STD. So we're not going to import the standard Library. We're going to use the critical elements like contract implementation and environment Within. You know, and let's grab a simple as well from the SDK and we're going to declare another contract on track, okay. And then we're going to implement this contract with a couple of functions. Let's clear this upper way contract implementation, all right. So what do we want to test to sort of showcase this? Well, first, let's compile often, right, never too often to compile. We're going to have some data in the storage and on The Ledger and we're going to save and retrieve that data. So + +[33:00] We're going to have a couple of functions. So let's declare save as the one that will put data Into The Ledger and we're gonna explore a number- it's gonna be u32. Now, save is going to return the same number, just for the sake of it, I guess. And we're gonna load a number environment and it's going to return that number. So it's essentially save and load, two functions for this contract. Let's compile this again. Make sure nothing breaks. Oh, okay, well, I guess I compiled it a little too early. Let's return some value here, okay. So we have + +[34:00] Something of an issue. I remove this. Let's make sure this compiles again. Let me back out of this. Oh, we don't need to back out of this. We're just not returning a value before. Should have trusted the rest compiler errors. All right. So we call save. It takes in a number and, by the way, this is one of those nice abstractions that you get with an environment like usurubund app let's say, a function takes in some arguments, right, in this case it takes in a number argument that's a u32. Well, we have a nice modal that we can input our number to confirm and that's going to call our function. As far as integrating into soribine, + +[35:00] There's another nice feature that's part of the, I guess, Soroban contract specification. So when you compile the contract, there's some amount of that byte code that's compiled, allocated for specifications. So let's say save something, rather save a number to The Ledger. Okay, let's compile this again. We can see that the modal now incorporates our comment for that function to describe what exactly this number does or what exactly this function does. So we're going to click confirm and let's add a another specification here or the docs for this function. Load a number from the layer, pile this again and oh, it doesn't take any arguments. + +[36:00] It's not going to display that modal. That's fine, all right, let's return the number as our first exercise. It's gonna be u32 and 42. We got 42 in the console here and right. So now the magic part. How do we take the number that we provided in our modal here and put it into storage? Wow, we need a certain key, something to store it under, and for that we're gonna declare a constant and it's going to be a symbol value. That's why I'm imported symbol and it's going to be. I'm not original with names. Under these circumstances, I'm just going to use storage and over here we're going to pick the same + +[37:00] Environment that we've used before for logging and we're going to reference storage. Okay, compile this. Oh, about the year I was expecting. So storage is a method, not a field, as we can see inside of this error message. We can go ahead and inspect what exactly storage is in the documentation and let's see exactly how to use it. So we can see that there is a set function under storage. It takes a key and a value. Okay, so, with this new knowledge here, since this is a method, not a field, we're going to call This And since we observe that it has a public function set, we're going to call that set function to store what we're storing. So we're going to provide storage as the key, right, + +[38:00] And we're going to provide number, Something That We're storing or the value of this. So, save 42. Confirm, all right. How do we know that we did store this correctly? Well, that's why we have this load function here. And how do we retrieve something? Right, we used set to store it. How do we retrieve it? Well, we can go back to the documentation real quick and see that there is a get function associated with storage and it returns an option. That's a result that potentially has that value. So there's, you'll see that there's some unwrapping that needs to happen. Oh, so we go ahead and reference that same storage, and call the get method, and we want to get the value under storage, the key. So what do we get? We get + +[39:00] An error because we're trying to return a u32, so let's return to u32. Now we get another area that says we want to have a u32, but we get an option. So whenever we're getting a value from Storage on sorobond, any kind of in a store buying contract, we don't know whether the value is there and we don't know whether that value is the value that we're going to return, in this case a u32, or it could be a symbol, we just don't know. So we can go ahead and unwrap this option and see if that works well. Then there's the question of returning a result. So at this point we unwrap the option. We're sure that there's a value there, but we're not sure whether that value is a u32 value. So we can do clever unwrapping here. But I'm just going to do a silly unwrapping so it panics if it's not a u32 and let's + +[40:00] Compile this again. So let's go ahead and Save 111 and load, okay. So we just put a number onto The Ledger under a specific key and we retrieved that number from The Ledger- we had an experiment with 420- and load the value, do something else. It works okay. So there's a complication that I want to implement here, and it's a little bit different from what we did during Paris blockchain week. This is something that tolmer told me to do, and so I'm just going to see if this works. If it doesn't, well, hey, this is a demo, these things break. So here's the gist I want to store a number for a specific account, so + +[41:00] I want to store numbers under accounts. I just don't want to store a single number. I want to store it as many numbers as I have accounts. So we're going to modify this save function here a little bit. We're going to take in a user argument or parameter. That's called user. Let's type address, so address refers to any valid blocking address on the Stellar blockchain, and then we're going to take in a number, we're going to return the same value and let's just see what this does. So if I call Save, I'm greeted with this second field here. So use oribon. You know, this modal expands to however many arguments that your function takes. And there's another cool abstraction here: when dealing with accounts on the blockchain, it can be like really tricky for someone just get getting started, right, how do I know how + +[42:00] To generate one? Do I need the private or the secret key here? That's a lot for someone that's, you know, into JavaScript or something. So for that reason, you saw a line app completely abstracts away interactions with accounts by using just regular names. So how many users does the system have? It has three users: Alex, Bart and Cali. You can select either one, and that in the background abstracts it away into a specific address within the back, within the current environment which is running in your browser. All right, so we select Alex one. So nothing happened really. We just, you know, have a parameter that we're calling this function with, but we're not using it yet. So let's try and use this. Okay. So, under storage, instead of putting everything into the same key, let's try and + +[43:00] Use user. Okay, so let's compile this thing, get compiled and let's call Alex and let's do pen, okay, so that sort of kind of worked. But how do we sort of debug this? Right, we can probably do a log here, although maybe we should just use the load function. Okay, I'll just do a little function here. So when loading, previously we only had one key and we have any number of keys that correspond to each user. So instead of using storage, we're going to use user. Here's an experiment. So let's, we saved. I think we saved. Let's save 10 under Alex again. So let's + +[44:00] Load Alex. Oh, we got a 10. So that worked. Let's try. Bart, it trapped, okay. So the Trap means that the value doesn't exist, but we explicitly tell this thing to unwrap, not caring about the underlying value. So we can go ahead and sort of test this out. So here's this log macro that we used before and call this with an arrow, just to have some string value in there and let's see what that returns. We probably need to do some matching here. + +[45:00] Foreign type. I think I'm going a little over the description here, but the point being is that we can store values under whatever key that we want, and the key can be anything. In this case, I just displayed that it can be a specific address, so we'll just go over each user. Alex is going to have 11, Bart is going to have 22 and Cali is going to have 33. + +[46:00] And let's go ahead and check each one. So, Alex 11. Okay, Cali 33 and Bart is 22. So, this is much more elegant than what we had during Pierce blockchain week. Were you storing values under specific usernames? Thank you, Tomer, for this suggestion and sort of the nice deviation from the standard here. All right. So that concludes my second demo for the Soroban state of mind. If anyone wants to again experiment with this, please tweet me on Twitter at soribandev, and I'll make sure you have access. Thank you so much, Morgan Tyler. Yeah, let me bring Tyler onto stream. Howdy Tyler, how are you? Hello? All right, let's bring up your screen here. And, nice, tell us all about the 100 + +[47:00] Million dollars. Two opportunities sounds very interesting, so I'll let you take it off. Yeah, don't let me talk about much. They do. Let me talk about the money. So we've got basically two main opportunities here for engaging with Soroban right now. So we're in this phase between being on the future net, moving to test net, eventually be on mainnet later on this year. But while we're in this phase of developing Soroban and really trying to understand how to build it, in this malleable stage of solving difficult problems, and trying to build the bless the best blockchain smart contract value transfer protocol in the world, there's some unique opportunities here, and I'm going to talk about two inside of this 100 million dollar Adoption Fund that the sdfs put out. One is the one that I'm responsible for and that's fast, cheap and out of control. It's kind of a. It's primarily an + +[48:00] Educational content generation engine where you can learn rust, smart contract development, how those two things come together under the sorobond umbrella. Right now it's a game that really takes you through Soroban contract development with the objective of trying to build fast contracts, cheap contracts and out of control contracts. Over the next few months, this is we're dropping a little bit of alpha today- we're going to be migrating a lot more attention to fast, cheap and out of control for sorobond development. Stellar Quest is going to become really for a vanilla or, if you're on team classic seller, that's where that education is going to be- and then for Soroban related material. It'll all be living on Fast, cheap and out of control. So make sure to keep an eye out on that. And the second one would be the Stellar Community Fund. Right, so we got our fast Jeep and out of + +[49:00] Control. That would be your site for that- fca00c or ooc, I have all the domains for this. And then the second one: for the folks that are developing protocols or startups wanting to migrate existing DeFi protocols, Etc, over to the sarban ecosystem, the seller Community Fund is the place for you. We've really been optimizing Anka and the team have been optimizing the Community Fund for both the vanilla, classic, Stellar applications as well as Soroban applications. So there's a really nice clean process for getting onto the seller Community Fund, getting funding for different protocols or experiments that you might have for that. So those would be the two big pieces that I'd call out again the fast Jeep and out of control. For those who are either new to + +[50:00] Rust, smart contract development even is the Soroban ecosystem evolves and grows. A big gap that I have felt, as I've explored the competitive landscape, is not just teaching people how to write rust or teaching people how wasm works or educating people on DeFi protocols or AMMs, but really trying to identify where our vulnerabilities coming from. How do we write good smart contracts? Where do standards come from? With someone- like where OpenZeppelin might have come from in the Ethereum ecosystem, trying to ensure that the contracts that we're building are good. So building out games and community events for tackling specific difficult problems so they're able to arrive, a good documentation and standards for writing good smart contracts. Developing a smart contract is very different than developing just a standard application and some of those differences. I don't think we've focused enough on solving + +[51:00] Or teaching for, especially what the fcaoc is aiming to solve for, and then the Community Fund for funding folks that are actually doing application, startup, entrepreneurial type work. So that's those. Make sure to check those out and I will hand it back to Morgan, I believe. All right, sweet, I see here, Morgan, you got. We should definitely do weekly informal developer workshops. I think that would be awesome, and I think Matthias is slowly working on something that he's holding secret until he does, like a heart, build the beans. Matthias, I know, do you want me to share a screen here, Morgan? I think we're good. I don't have a slide to go with this, it's just gonna be my saurabhan story, I guess, and a little background as well. So no slides needed. And + +[52:00] Yeah, by the way, very nice shout out in the Chinese or free access to viewers if you tweet at me on Twitter, all right. So my story about story. So my developer story, going back real far, started as a kid with a Fascination with computers and I remember I was 14 when I wrote my first line of code and for me that was, you know, that experience informed what I was going to do from that point onward, and I've made a career out of it in almost 20 years of frustration, followed with the joy of seeing something that you build come to life. So that's, I guess, any developers Journey there, and the reason I'm sharing the frustration with the joy part is because I feel this industry- and I'm talking + +[53:00] Specifically about the blockchain industry- has not focused enough on the joy of building while providing ample frustration, and you can probably see that in your own experience there's a lot. So over the last two years, I've tried playing around with a few smart contract platforms, spending considerable amount of time with two of them and in the end I felt a little bit like Dorothy from The Wizard of Oz in a conversation with Scarecrow and Tin Man, trying to find out why one had no brain and the other one had no heart, and in my frustration, I continued looking for a platform that had both a heart and a brain. And just last November I found Soroban. The first sore bonathon was taking place and anuhe was hosting that and it was a beautiful experience. On GitHub, you log in and you know everybody's + +[54:00] Sort of submitting their own ideas and thoughts and experiencing the joy of development, technology and right. Yeah, so I took part in that. I've jumped right on it and I found out about Soroban in the process. I found out about XDR, which is the data format for storing things on Stellar that Soroban uses, and then I started getting into rust, and it's a long journey and doing so without prior experience, mind you. And then, lastly, was webassembly the one of the key building blocks of Soroban, and my first projects were very simple, mostly to do with optimizations of the development workflow, one workflow or another, and I made some submissions along those lines. I think it was five in total. Yeah, there + +[55:00] Were a lot of them. Actually, you were one of our Superstar submitters to silverwater thought, if I recall. Yeah, and it was a blast, and the community on Discord was great people were commenting and so forth. So that was a really soft and nice introduction to sort of buying. And my final project as part of sorbineathon- I think I submitted it last day- was a browser tool for interacting with the Soroban RPC server, something that a lot of folks do when interacting with any kind of blockchain. You don't go directly to a node you're running, you go to an RPC server for y'all, remote procedure call server. I remember the https from Bloomberg, gotta spell it out. So the RPC server thing: I wanted to build a great + +[56:00] Developer experience for anyone building and testing their contracts, because deploying the contract, invoking the contract, is often tricky if you want to do it with arguments and stuff and especially if you're sort of coming in from a command line. And in doing that I spent countless hours working on various parts of the stack, from the wasm executable to the host environment. And then it hit me. I realized something: I had everything I needed to build a self contained browser environment for executing and testing sort of on contracts. And I just sort of blew my mind like I had everything. I needed to build the perfect playground experience and so seeing how coupling webassembly, the virtual machine, together with sorbent host environment specification, enabled + +[57:00] That browser based developer tool that I had in mind, and many others that you can sort of picture, was my aha moment, and I just started working in seeing how I could build tools with you know, within the browser, that required no setup, no Docker. I'm no fan of like all these massive setups that take 10 minutes if you're efficient. No, nothing, just sort of log into a website and start experiencing sorbine like that and I, you know, immediately afterwards I started building it and I started debugging and engaging the community further and this was, I think, in February, when some of the programs that Tyler mentioned- SCF- I applied with use sorobond app + +[58:00] Because at that point it was brewing for like two months and I got in and, with the help of the Community Fund, I was able to continue building this sort of- I call it the developer playground of my dreams, because it's everything I ever wanted from a smart contract playground, and all of this happened like from scratch to launching a community funded project in less than four months- yeah, like October, not even. I think it was late November, yeah, and then there was a gap in December and sort of you know, it's like light speed. So my experience so far has been Stellar and I really appreciate the community for allowing me to go through what + +[59:00] I went through. So thumbs up. And yeah, that's my Soroban story. That was so wholesome and well put. Morgan, thank you for sharing that. Seriously, I think there's a question here about youth sorbom. Where can we make token contracts on new Soroban? Very soon, I will open it up. Sorry, go ahead. Yeah, I'll open it up. Next week There's a massive update and I'm working on ever since getting back from Paris and stay tuned. I'll keep you guys updated. Yeah, I mean Morgan's all into this. Y'all. Just clarify. I mean here's an awesome office now and he's like set up like an entire space and whatnot. It's a whole thing you saw about is like the next big thing here in the store of on ecosystems- you hear, you heard it here first. I see there's Matthias is saying that I'll blow your mind 2x, Morgan, thank you sore. Bomb plus test Runner plus Cooperative coding in that Dev environment. AKA vs code live + +[01:00:00] Share. There's a lot of Minds ready to be blown, I think in the next few months. There's so much Innovation happening in this space. Absolutely, I'm so excited. Thank you for everything that you do for this Arbonne ecosystem, Morgan. So, yeah, I'm gonna bring Paul back up on here. Everyone, hey, everybody, right, I'm just gonna wrap up here quick. Yeah, I hope you'll All Join the community and we can blow mines together. Smart contracts bring a ton of new Incredible use cases and power and Innovation to Stellar. All right, for me personally, you know it's a hugely exciting new piece of tech, right, like what can I say? Like I'm an engineer, you know I can say that storybond will allow us to bridge the disconnect between developers and people on the fringes of financial inclusion, right? Or that there's a lot of chances to learn, Tinker, build, earn rewards with the Adoption Fund programs, like Morgan's doing. + +[01:01:00] But what I will say is that I hope. the experimentation we're all doing will lead to building real businesses, real projects and real world use cases. On sorbonne to help further Financial inclusion: yeah, if that gets you going the same way it does me, then my job's done, cool, I can go eat dinner. But like, seriously, you know, if you're excited about this, right, you can start experimenting with Soroban right now. Right, it's live running on our experimental test net called futurenet and I want to encourage you all to consider how and where you might fit into what's coming later this year when Soroban launches for real onto Stellar's net, onto Stellar's mainnet. And if I were you, yeah, start coding right, check out you serve on. Tell your programmer friends to poke around, see if it's the right time to take a look at, Stellar. There's a QR code on the screen that'll take you to `soroban.stellar.org` that is the place to go to learn more. There's links to everything you need there: docs the Discord, join the Discord, keep up with developments. All the devs that are building Soroban every + +[01:02:00] Day are on there answering your questions. Yeah, thank you so much for your time. I can't wait to see what you'll build. Back to you new here. Thank you so much, Paul. I do want to re emphasize here the fact that we are building Soroban out there in the streets as I like to think, and not in like some laboratory. So everything is out there. Join the developer discords, be active. As Paul had said it best, everyone on the stream that is watching or will ever come across is: you're already part of the Stellar ecosystem and Community here, so come on over, start building, tinkering and let's make this a vibrant and thriving community of developers. So yeah, thank you all for joining us on the stream. Stay tuned and we'll see you in the Discord. Take care. Bye, everyone. + +
diff --git a/meetings/2023-04-20.mdx b/meetings/2023-04-20.mdx new file mode 100644 index 0000000000..6d4a0834d5 --- /dev/null +++ b/meetings/2023-04-20.mdx @@ -0,0 +1,174 @@ +--- +title: "Archival Fees and Temporary Storage" +description: "Design discussion on Soroban state expiration economics, examining rent burning, archival incentives, and temporary storage pricing, with a focus on minimizing complexity while keeping ledger and archive growth under control." +authors: + - garand-tyson + - graydon-hoare + - nicolas-barry + - paul-bellamy + - siddharth-suresh + - tomer-weller + - tsachi-herman +tags: [soroban, CAP-46] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This discussion continues the exploration of Soroban’s state expiration and rent model, focusing on what happens when entries expire, are deleted, or move into archival storage. The group revisits earlier decisions around burning outstanding rent balances and evaluates whether additional incentives are needed to encourage deletion of unused ledger entries. + +A major theme is simplicity versus precision: whether to introduce a new “archival fee” to motivate cleanup, or instead rely on pricing differentials—especially making temporary entries meaningfully cheaper—to guide developer behavior while avoiding extra fee types and conceptual overhead. + +### Key Topics + +- Burning outstanding rent balances on deletion: + - Prevents refund-based attack vectors where malicious contracts reclaim rent paid by many users + - Avoids unfair redistribution where one actor benefits from rent funded by others +- Archival storage characteristics: + - Archived entries must be stored indefinitely, even if later restored + - Deletion is strictly better for entries that will never be used again +- Archival fee proposal: + - Flat, refundable-at-deletion fee paid on entry creation + - Burned if the entry reaches the archive + - Intended to incentivize deletion before archival + - Downsides: new fee type, added complexity, and larger entry size +- Temporary vs. restorable entries: + - Temporary entries have fixed lifetimes and never enter the archive + - Restorable entries persist, accrue rent, and may be archived +- Pricing and incentives: + - Current cost gap between temporary and restorable entries is relatively small + - Proposal to widen the gap by discounting temporary entries instead of adding archival fees + - Discounted temporary entries encourage correct usage without extra mechanisms +- Developer ergonomics and complexity concerns: + - Reluctance to add another fee type purely for edge-case deletion incentives + - Preference for fewer storage types and clearer mental models +- TTL (time-to-live) behavior for temporary entries: + - Current model enforces strict expiration for security-sensitive use cases + - Debate over allowing TTL extension (“bumping”) versus keeping lifetimes immutable + - Concern that extensible TTLs could undermine security assumptions or allow rent gaming +- Consensus direction: + - Favor simpler pricing adjustments (temporary entry discounts) + - Avoid archival fee refunds and additional protocol complexity + - Preserve strict TTL semantics for temporary entries as the common, safe default + +### Resources + +- [CAP-0046: Soroban system overview](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046.md) + +
+ Video Transcript + +[00:00] Hey everyone I think we can get started. So this is the storybond design discussion in, which we discuss protocol changes related to Soroban our topic today is State expression we're going to keep talking about rent and temporary storage Ledger entries. So Garen can you take it away yeah. So I think at the talent of the last week's meeting we were discussing about what happens. When entries are delayed with outstanding run balance and we decided, that it was probably not a good idea to refund the outstanding balance and instead just have the balance be burned the reason for this is, that. If you allow rent balance refund. Then you open up a lot of kind of a malicious attack vectors for instance. Because entries automatically have a run bump whenever they're touched you can have a smart contract, that has every + +[01:00] invocation of a function Touch One admin entry for instance. And then this adminant tree especially for a popular contract could have lots of rent balance. Because it's touched another function call. And then you could have an admin of this contract who maliciously deletes and recreates, that entry periodically in order to essentially steal this rent balance from the collars of the function and also this would be difficult for users to be able to detect or even see. If the stack Vector exists. Because for something like an admin entry it would seem like a normal operation for and not another contract to either create or modify, that entry. And so it'd be difficult to detect these sorts of attacks also refunding my balance doesn't really make sense. Because many different users may pay for the rent balance only a single user benefits from the refund. And so for these reasons it seems, that the best policy for whenever an entry is + +[02:00] delayed without standing rent balance is just for this rent balance to be burned. Now even, though we're burning on this outstanding Grant Balance we still want there to be an incentive to delete entries before they're archived. Because even, though we have the state expiration to reduce the size of The Ledger. When an effort a restorable entry or like, that's what we're calling. Now entries, that are eligible for archival. So in non-temporary entry. But whenever these entries run out for end even, though they're delayed from The Ledger they still have to be stored perpetually in the archive and so. If you have an entry, that's no longer going to be used it's significantly more advantageous for this entry to be deleted instead of being sent to the archive. Because even, though the archive is kind of like our deep storage and it's much slower and we have much more we're learning there for large amounts of state it would still be better just to have not have useless entries or entries, that will never be used again taking space in The Ledger or + +[03:00] the eye archive. And so since we can't incentivize this Behavior via a rent bounce refund we're thinking instead of adding a base fee called the archival fee and essentially what this would be is just some flat fee probably tied to the size of Ledger entry, that you have to pay in order to create the entry initially and then. If you delete the entry before it gets archived you receive this as a refund. And so this allows you to have someone set up for deletion and then. If you don't delete an entry and gets into the archive. Then this archival fee is burned. And so the thinking being, that this gives users incessive to delete entries before it gets into the archive but. Because it is a fixed rate and is not something, that can grow to large amounts like red balance it can't be gained nearly as much and one of the drawbacks is, that this does make you know the answers more expensive and this also increases the size of the entry. Because we'll have to + +[04:00] store the archival feed as a parameter. So it increases the size of all entries by about four bytes. But I think this provides a good incentive and can help us maintain the hygiene of the archive. And so I guess General thoughts on both the issue about burning rent balance, that's outstanding and also on this new archival basically foreign. So I think yeah I think on the red balance getting burnt I think we're pretty much in agreement unless someone else has a different opinion there I am very much concerned about introducing yet another type of V for the purpose of motivating people to delete their entries especially. Because we already have temporary storage, which hopefully should take care of like most of these cases like what is can you give us + +[05:00] a use case for like. When a non-temporary non-temporary entry might get deleted and how often do these happen yeah. So just have to depend on usage patterns. And so one thing with the temporary entries is, that their lifetime is fixed to a set amount or to a very limited time frame. And so this is built for security reasons. But also just for fee based reasons as well, that temporary entries will have a maximum life something probably on the magnitude of like months. So like three months or six months or something like, that the exact value happened on the side yet. So I think, that there's a use case for say you have something like a payment Channel, that doesn't need to be preserved these don't want it to last longer than the maximum life of the temporary entry would be. And so I think for Android types like, that aren't super important from the security standpoint. But have a variable length + +[06:00] time or a variable length lifetime. Then there might be some advantageous here for deletion I think the second issue too is. If we have this archival fee it provides more incentive to use temporary entries as well. Because right. Now the savings you get from a temporary entry are relatively minimal. Because there's still essentially how we calculate the temporary entry fee is, that say. If your temporary entry lasts 128 ledgers what you do is, that we take a snapshot of the current rent fee multiply, that by 128 and, that's how much temporary entry costs. And so today a temporary entry and a restorable entry are very similar in price. And so I could see network incentives being well like well they're essentially the same. So just be on the safe side I'll use a restorable entry. When a temporary entry really could do so, that's the second advantage to having a space feed is, that by adding this fee to the restorable entry types and not adding this fee to the + +[07:00] temporary entry types you also widen the gap between the cost of a temporary entry and a costly restorable entry to further incentivize more use of these temporary entries and lessen the restorable entry type have you considered any other options for widening the gap between temporary entries and restorable entries, that doesn't include an archography oh. So there's like a there's an implicit savings. Because the rent fee snapshot and. Because rent fees tied to the bucket list size generally speaking at least for the access patterns you're seeing on starting classic. Now this might you know vary. Because we haven't seen soap on traffic yet in the real world. But the bucket list grows relatively linearly and so. Because temporary entries are locking into the fee and we expect the bikeless size and. Therefore the print fees to increase linearly you are getting a cheaper rate. Because + +[08:00] you're able to snapshot, that being said especially for short-lived entries the settings are pretty minimal. Because the rent fee doesn't grow at a very fast rate. And so I think they're I'm trying to think. If there are any other implicit fee signs at the moment. But I think. If we want to incentivize temporary storage from a feed perspective we will have to add some additional costs to the restorable entry camera points counterparts. Now whether, that be a refundable fee or other fee, that's refunded on deletion or just a fee of creation, that's immediately burned I think either of those could work. But I think we do need to have some sort of price discrepancy in addition to the implicit discrepancy in the rent fee snapshot alternatively. If we think, that we don't need to incentivize these are temporary entries from a feed perspective. And then just the utility they provide is good enough incentive on their own could also + +[09:00] go back route but, that could cause more archivable and restorable entry types than we want floating around the network and make our archives larger than they really should be okay yeah the question is how much bigger will they get. Because this idea of yeah like creating like this you know middle you know basically what you're saying you're like is, that there's a whole category of like Ledger entries, that are like between temporary and restorable, that could theoretically like not be archived and we're going to introduce another fee mechanism specifically for them and. But and we already have a fairly you + +[10:00] know across the board we have a non-trivial like fee you know various fee mechanisms. So introducing another one is something, that I'm not entirely comfortable with it yeah yes any other potential saving is just, that the temporary entries never require proofs for creation whereas some types of restore boundaries require briefs, which is another difference, that could be useful. So for instance in the using storage type requires a proof of non-existence to create or temporary entries never require proof, that being said the recreational storage type does not require such groups I think temporary entries are a powerful primitive on their own. And so I'm not sure how much we need to incentivize their use from a fee perspective. So I'm just curious as to people who know like the smart contract + +[11:00] ecosystem better than me are like the temporary entry is strong enough on their own and do we not need this additional archival incentive foreign yeah. So I think the you know the main use case we're seeing for temporary storage is oracles we've talked with some of like the major Oracle providers who would love to see this. And so I don't think even. If there's you know. If even. If there's like even. If with not a lot more fees they can get archivable storage I don't think they'll be interested in doing this. Because again their data is really short-lived is relevant for a short-lived time the other thing is, that. If like actually. If if we add this archive or fee people + +[12:00] will actually need to reclaim it right so, that's like we're basically adding an operational burden on these entities to actually reclaim their fees through. But I mean I'm. But this is sort of place temporary entries right. So with the Oracle use case still want you know temporary interest of course this doesn't replace, that I guess the question is there a good or is deleting a restorable entry happen often enough to have the complexity of an extra fee and to add the additional four byte archival fee value right. Now I'm hearing no but, that's probably not a use case, that will happen very often and I kind of like appropriate and thinking about yeah I guess my opinion + +[13:00] understand is clear is there anyone else with thoughts in the matter what is the cost of letting this go to archives like getting like a having a archival entry like go to the archives and stuff instead of again deleted also just the thing about the archive is, that it never shrinks so. If an entry gets archived. Then you have to store, that in the archive versus. If the entry gets deleted. Then you don't have to store it in addition even once an entry is restored from the archive we still have to keep essentially a stub of its existence in the archive. So what happens is, that whenever an entry is restored it's Leaf node, that uses from The Ledger entry as nulled out. But the path to the node still needs to be maintained for all eternity. And so I think just given, that the archive is pretty much append only it is designed such, that you know this is like a slow data structure + +[14:00] that's meant to be you know like the archive nodes are essentially just like you know SSD base and they're meant for storing large amounts of data. And so it's not nearly as sensitive as the ledger size. But it is something, that we should probably keep in mind as, that. If we send something to the archive at least some of, that data even. If it's restored later will have to live forever, which is why. If there's an entry, that user will never use I would much rather, that interview deleted than having to store it perpetually in an archive even, though archives are kind of meant to store things perpetually based off of what you said like I would expect temporary like I think you mentioned, that the fee difference between temporary storage and non-temporary storage it like is like negligible. But I would expect it to be someone significant just based off what you said oh what do you mean like I mean like it would make sense. If we made temporary storage cheaper to incent to not incentivize what you're describing worry just like using + +[15:00] recreatable storage well, that's actually another thing we could do is instead of adding an additional fee to archival storage what we could say is right. Now temporary storage is just the feed calculation it's just a number of letters to live times current rent fee what we could do is just say like numbers of ledgers to the times current rent fee times like 0.8 or some like temp fee you know multiplier such, that the temporary entries are still based on the current size of the bucket list. And so they can still increase or decrease in price as the Ledger state grows or shrinks. But they're strictly cheaper than archive entries by a significant margin and, that also kind of simplifies the fee story. Because we're not adding an additional fee type. Because we already have separate fees for temporary entries. And so this would just make them always smaller, that would actually probably be a much simpler better idea than I think having this refundable archival fee + +[16:00] thing yeah I guess this doesn't incentivize the actual deletion of those entries. But I think you've seen less of them yeah I think. If temporary entries are significantly cheaper or at least substantially cheaper. Then we didn't don't really need to incentivize deletion. Because if you're thinking about like what the use case for restorable entries are especially for like the recreatable type is pretty much just balances and I mean the I think the amount of balance deletions is probably pretty small. And so I don't think it's probably a huge case of special case for it yeah I completely agree I think. If we figure out a way to Discount the temporary entries the way, that you described, that will go a long way towards making people use them and in terms of you know the previous point on saving space for archivers I hear you the question there is like + +[17:00] you know like how much like makes it worth it like. If we saved them you know ten percent of space like does, that worth is, that worth the added complexity like obviously. If we can. save archivers you know an order of magnitude's base or a goose in order of magnitude the amount of space required then, that is like a huge win. But I don't think we'll get there I think there's like a pretty narrow use case for these deletable restorable entries. So definitely my vote is for like the discount approach for temporary storage yeah I think I blame the discount approach too I think also just like on the our order of priorities we should prioritize kitten won't let your small and not prioritize keeping the archive small, that much especially. If it's like I think like a 10 decrease wouldn't be, that big of a deal. So I think I'm also leaning towards the discount temporary entries approach as well are there any last thoughts before I + +[18:00] move on I guess the conclusion was discount temporary entries and have no refunds for archival storage I'm thinking of is, that. If we go, that route right where we basically say hey the archives are kind of a dunking ground of like all abundant major entries I think what we have to maybe like take into account is I think it's probably okay like I mean imagine, that those nodes, that are going to be used as a to back up those high value Legend trees yeah the problem will end up doing some level of filtering like they only for example people on the keep our own Villages for things, that look like balances for example or, that are related to balances + +[19:00] as opposed to like random stuff. But I think even in, that situation I think they have to keep the full training around right or hashes yeah so. When how often does do we reset the tree is it like on a pair restart the tree it's currently your epochs it's every year. So then the Assumption will be, that you know in a year we don't have too much of this craft accumulating, which might be okay I mean, that's kind of like the yeah like the I think for yeah for those entries + +[20:00] that are not like you know you were talking about it as a discount to get to the for the pricing of like you know attempts I think it's the other way around it's more like you add actually. So there's probably like a constant term. And then a additional you know rate on top of the price to get to the restorable entry fee like the very first time you created and I think there's probably a constant Factor I did want to bring Ian from the crowd to speak. Because he has some interesting perspective and is very passionate about it sorry I was. So passionate I've written papers on this topic for other blockchains as a paid service + +[21:00] so I've done a wide breadth of study as well as deep diving into different trade-offs, that were done and this influenced their design decisions. But they're slow to move and slow to change. So there's a couple different concepts here the first one I want to cover is dust and, that's the idea, that the value of a record maybe less than the cost of maintaining it and those records should probably be cleaned up automatically. But it's a problem on Ethereum. Because you can never quite spend out a whole wallet you always have a little bit left over and so, that little bit left over is referred to as just but, that's only one example of dust there's many of them the second kind of approach here is, that there's a huge ecosystem, that's growing around smart contracts and each of those ecosystem players has different data needs and + +[22:00] there's no one solution, that fits everybody and even you know other monolithic chains are dealing with storage problems. And then any modular chain has an extreme storage problem and the storage problem needs to be addressed in a way, that you don't have nodes needing to install like four different protocols four different systems in order to access the data, that's being referenced sometimes it might be 10 or 15 different storage protocols in order just to access what was in a roll-up what was intended by the data, that was submitted to the blockchain for one second in the context of the current state expiration proposal by Garand like what are you trying to say hmm. So you need to have different tiers of pricing a rent model is insufficient and + +[23:00] and. So is a couple other you know like sort of well we see this problem. Now what are we going to do there's not one solution, that fits it's really important to give a lot of flexibility to the people, that are deploying on the network and give them as many options with pricing up front and they will simply do whatever is most efficient in order to save money up front. If you try to add an incentive like later on there's a few problems one is, that your adoption is going to be very low and the second is, that you're going to you're not going to have what they're necessarily looking for. If they can just pay again to extend the lifetime of, that storage then, that's, that's the best of both worlds ideally there'll be an external storage for large amounts of data for example blob data should be not stored necessarily on + +[24:00] chain. But on a separate layer and you can incentivize the creation of those systems by limiting the amount of on-chain storage. But allowing pointers to exist. So your best long-term solution. When people start doing crazy things with blockchain, which they're starting to. But they're struggling to. Because the storage costs are. So high is, that you're going to start seeing a lot of you know flexibility wins the day and so. If you have pricing tiers automatic expiration optional renewal and you're charging by the byte by the day or in some cases by the minute. Then you're able to meet most of those use cases there are go ahead please I believe this is what we're pretty much doing I could be mistaken but. If I'm understanding your proposal correlates, that you have different tiers, that automatically expire. But the option to + +[25:00] renew I believe, that's what we're doing with the temporary entries being automatically deleting and being the cheapest whereas the restorable entry types must be periodically renewed. So to speak I it is are you proposing something, that's different than this well I heard different proposals and I think, that I embrace what you're saying in terms of tier proposals I would just suggest number one, that there's a large number of tears at least four and number two some of the data will be archived with no ability to recall or. If you excluded from archive with no ability recall some can be removed and not archived. But pay an oracle to recall it and some can be you know permanently archived. Now I'm confused by you say remove you can pay an article two + +[26:00] call it what do you mean by, that I believe, that's how our accountable system is essentially R5 the archive is off-chain. And then you pay to restore something from the archive. So link to our archive proposal is essentially the Oracle system you're talking about so. If it's an optional flag they can pay for it. If they think they need it. But your archive is still smaller the reason, that you might want to do, that is, that some people specialize in storage and they want to earn a fee for specializing in storage and by specializing I mean over 20 terabytes probably around you know a petabyte or. So okay. So I think Ian. If you can maybe refer back well we can drop a link here to the actual proposal and we'd love to hear feedback about specifically I think, that the type of + +[27:00] issues, that you are or the type of solutions you're talking about are actually embedded there. But we'd love to hear. If you think, that's not the case we're talking to details. Now but I believe the archival system is pretty similar to what you propose actually. But you can definitely add some comments in the docs. So I guess are you ready to move on to Temporary entry issues okay. So I guess and the second thing and this is somewhat relevant to the previous conversation is temporary entries. So our current interface and I guess we've concluded, that the temporary entries are substantially cheaper than the restorable entry types. If I have some multiply or something like, that. If we have the temporary entries currently the interface is you define a key to find Value and Define a TTL + +[28:00] measured in ledgers such, that our time to live such, that the entry is guaranteed to be deleted on the exact Ledger, that you specify. Now systematically under the hood the entry is not actually removed on, that ledger it persists in the bucket list for, that sometime. But it is inaccessible. Now one of the open questions we have is whether or not we should allow users to increase the TTL arbitrarily and in the current proposal we do not allow this there are two reasons there's a security reason and a gamification reason. So first for the security reason we suspect, that there is a strong use case for temporary entries. When it comes to things like allowances or kyc where. If you specify an allowance, that should only last 10 ledgers mentioned the last exactly 10 ledgers and by allowing contracts to Define behavior, that increases TTL you can have + +[29:00] foot guns where a buggy implementation might have some security a temporary authorization, that should only last a short amount of time. But by allowing life extension Primitives you are potentially allowing bugs for security purposes, that's issue number one issue number two is out of gamification. And so the whole issue with temporary entries is, that we want them to be cheaper. But we don't want them to be a way to game the rent system and what I mean by, that is. If continually extending the TTL of a temporary entry is cheaper than just paying rent, that kind of excuse network Dynamics. Now I'm not exactly sure actually this is something we should avoid. Because temporary entries are self-deletion deleting. So maybe we should allow users to just continually extend the life and essentially have cheaper rent than. If + +[30:00] they would create a restorable entry and just pay rent balances on those ledgers. So perhaps this is something we want to allow. But at least for. Now it seems, that's a somewhat unfair usage for the temporary entries and we wouldn't want temporary entries to be used and to be able to continually extend their TTL such, that they can drastically or pay significantly reduce the amount of rent. And so their print system we do not allow modification of the TTL. Now there is an implicit way to extend the life of an entry what you can always do is load the entry delete it. And then recreate the entry with the exact same value, that's still allowed. And so you can essentially buy this delete rewrite pattern specialized special case and extend the entry with the same key value. If you really want to. But we provide no way to essentially keep the same entry books in the TTL. And so I'm wondering. If there are any additional thoughts on this should we allow arbitrary detail extension should we not this is a slight side point maybe and + +[31:00] kind of a question it seems like the difference between restorable entries and tempor entries is temporary entries you have to well this is your question I guess is should we allow rent bumps or do you is it prepay only for temporary entries. But it seems like the only real difference is restorable ones go to the archive and temporary ones do not go to the archive is, that roughly true or not quite yeah correct there's a couple of other differences like. So right. Now like temporary entries are prepaid up front. And so you get a better rate. Because you're paying essentially like for months like you're paying for a large amount of ledgers instead of one-offs one-offs I guess I'm saying could we just have one type of thing, that there's basically two different things there's how much Renta has, which is how long it stays on chain. And then whether or not you've paid for it to go to the archive after, that point or not right and. If you pay more rent up front for either of them you get a discount. And then this whole cheaper way to use it goes away. So I think we shouldn't Define a + +[32:00] difference and, that's. Because the primary difference and I should have mentioned this is, that temporary entries I have strict lifetimes whereas Right. Now restorable entries do not have strict lifetimes. And so just the way, that the buck list Works we have a variable per larger red fee and. Because it's you know variable we don't know what the Run fee of a given Ledger will be until, that ledger occurs, which means in order for the database to be up-to-date on the live rent values of every entry we have to iterate through the entire Ledger and decrement the rent balance of every single entry, which is impossible from an efficiency standpoint and so. Because of this we charge really much retroactively, which means, that for restorable entries they live and are accessible longer than they should be the exact number is, that we have about like on the lowest level bucket we have right. Now about 30 days of quote-unquote free rent where a restorable entry has out of rent. But + +[33:00] it's still accessible in live temporary entries have very definite time timelines whereas they only exist for 128 ledgers no more no less and I think for, that's a powerful permit for security purposes, which is one of the reasons we want to distinguish between the two yeah okay. But were you just asking about we should do allow rent bumps for temporary entries well. So not necessarily around bumps. But but TTL extension. And so in the current proposal of the rent system applies to restorable entries whereas they have a rent balance, which is some about XLM, which is deducted form. And so their amount of legislative is not exact the Quan quote web bump for temporary entries isn't increasing the rent balance. Because temporary entries do not have a rent balance rather it's increasing the TTL. And so even. If you allow changing of ttls you are it's not. So much a rent bump. Because you still have a very definitive timeline and very definitive depth + +[34:00] Ledger. If you will. So I'll defer to you on, that thank you my bias is not to allow extending the TTL hey it's just another piece of functionality, that people need to wrap their head around. But also it doesn't sit, that well with how like my mental model of what a temporary Ledger entry is so. When I think about a temporary Ledger entry I think about a piece of information, that I'm like broadcasting to the world for the next end ledgers and, that's it so. If if I update, that like what does, that actually mean like what is the actual use case for updating these temporary Ledger entries + +[35:00] yeah I personally don't know what the use cases I think one possible advantage of updating the entries is. If you have a some a type, that is trivially recreatable. So you don't care. If it gets deleted. But you also want it to exist someone perpetually I can imagine a efficient solution where. If the TTL is below like say 100 ledgers or something. Then use extended arbitrarily and. If for whatever reason you miss a TTL bump and it does get deleted these arbitrarily recreated. And so in this case you want to use an archivable type. Because it's triviably recreatable. And so it wouldn't be worth restoring. But also you want to use it perpetually. And so you would you wouldn't want a strip TTL. Now again + +[36:00] you know we could also do is just like do a temporary entry set to the maximum TTL and then. If it doesn't exist just recreate it. And then again set to the max dtl, that's also possible for arbitrarily recreable data but, that's just a use case I could think of the top of my head I don't know. If anyone else has perhaps more interesting or is more knowledgeable in the smart contract space wants to share their thoughts well I guess maybe like the with the CTL thing like why are we trying to babysit. So much like the you know like the contract like it feels like you know not allowing access of an entry right past its TTL, that's like and not allowing bumps you know for, that reason sounds like we are stepping over the boundary in terms of responsibility like the contract you know. If the contract doesn't want something to be valid Beyond a + +[37:00] certain time like, that should be implemented in the contract itself I think and. If we do something like, that I think. Then the bumping, that we're talking about I think we just have to make sure, that. When an entry gets bumped the number you know the actual expiration time will match the lifetime in the bucket list right like. So so like. If you're. So at creation time, that means we have to have like a function, that allows you to compute the like. If you want to go from a number of ledgers or a number or actually number of ledgers you can do, that of chain. But like or actually time is the same thing yeah like I guess bumping like where it gets a little bit complicated is yeah + +[38:00] just you have like some number. And then you try to say okay extend it by at least 30 days. Because you won't be able to actually do exactly 30 days. If we don't make, that strict TGI and you don't want people to pay to get free runs right for whatever is the you know it takes too much, that for to actually expire, that entry but, that seems walkable I mean those are like help of functions right like, that we can expose to the, that are basically like allow you to kind of Reason about the bucket list well I guess the interface would be a little weird, though. Because essentially what you could do is the CTL you could make temporary entries. But it would have to be a power of four ledgers. So like four zero fourth one Etc. But with a maximum lifetime being 30 days + +[39:00] days and, that just seems it just seems like a weird and not super user-friendly interface to not have a stripped thousand on a TTL. Because at least like I'm thinking of like use case it just seems from a contract developer perspective you know they don't have knowledge of the bucket list. And so it just seems weird, that you have this temporary entry, that can only expire on Powers of four for some reason with poor granularity and I think it's weird from a security standpoint. If you want something to only live 128, that you have this entrance type called temporary entries, that is self-deleting. But you on top of the self delaying temporary entry have to still in addition to, that Implement your own TTL. If for your use case it doesn't work to be on the exact boundary power of four it just seems, that like we're exposing too much of the underbelly of the system between the developers in, that case + +[40:00] so just to be clear. If I use a non-power of what is it power for remember it will still live until like past my TTL until the next Power four yeah. So the way, that just. Because of the nature of the bucket list apocalypse DB we can only remove entries from the data structure on power of four ledgers. And so in the current implementation we have the TTL, which is stored as a field inside the entry. And so the entry itself does live on The Bucket List beyond the TTL in most cases unless it happens to be a power four. But we just like do a check whenever you're trying to access, that entry and. If the TTL Ledger has passed we just return nulls. If it doesn't exist even, though it does exist on The Ledger. And then we garbage collect on Powers of four okay, that makes a lot of sense + +[41:00] so. So so I'm a bit confused as to what Nico just said like what the what's the confusing bit here also I think Nico is suggesting not to have the TTL field as part of protocol and to. If users want a strip TTL they must Implement, that themselves and essentially they're struck, that they store must have the TCL value and instead just expose an interface where temporary entries live and are accessible as long as they're accessible on the bucket list and so. When you specify a TTL you must specify a power for TTL Nico my understanding, that correctly yeah basically it's kind of like I mean you still have a TTL it's more like it's actually it's more it's represents the we were in a special case the like right. Now right like. If you have a. If you load an entry, that is in the bucket list but, that has an expired TTL you consider, that it's not there and + +[42:00] that we will get rid of, that special casing by doing, that and just requiring, that the TTL, that you have is actually going to expire on the on a bucket boundary yeah like yeah I agree with Garen I think this is the potential like we like there's only. So much we can expect from developers to understand the bucket list here and I think the basic expectation from a temporary storage is you know saying you know telling the platform this is how many ledgers I want this thing to live for and putting restrictions on, that or repeat or you know telling people you can only use powers before and. If you want to do it like an exact detail. Then you need to write your own smart contract I think, that's not the ideal product experience yeah I think yes the reason I mentioned, that was to allow for bumping, that here. Because if you have actually + +[43:00] if you're a number. If you want to allow bumping you have to take into account what is actually happening under the covers I don't know. If it's too complex. But I kind of think we're solving two different problems here I think there is the case of a temporary entry or, that has a certain deadline and dies. And then such as like kyc or like a an allowance you want to exist for 10 letters and 10 letters exactly, that's case one and there's a second case of you have the century you don't really care how long to live as long as it lives around this time. And then you might want to bump it on every access I feel like those are two different classes of storage and I think a temporary entry with a exact TL sells one issue. And so I'm wondering. If it might be reasonable to have like a flag say on the recreatable storage to say archive or not archive + +[44:00] because I feel like, that's a different use case than what at least in one on the temporary entry with the exact details getting at. But again, that might be too complicated having. Now like three or four different storage types you know or yeah I would rather not add anything else here I think, that the first case, that you described the one, that you know a contract wants to you know put a ledger entry for exactly n ledgers I think, that's the common case and, that's the thing, that we should, cater for. If they want to have more you know sophisticated situations in, which you know they can recreate or bump this entry once in a. While then you know they can write a smart contract, that achieves, that functionality, that can be updated at specific times + +[45:00] but I think we need to keep the you know keep the main contract to something, that's you know like the common use case yeah I think. If if you want essentially like Implement, that behavior it's still possible or just like. If like the ttls within X number of Legends just deleted and recreate the same value with a maximum TTL, that's very possible and especially. If we have a discount temporary entrance the deletion Recreation path might be at cost or cheaper than a restorable entry. So I think, that seems like a reasonable approach I think I'd rather not expose spotless powers for to any post-function or SDK okay it seems fine to not expose it. Then but at the same time like. If we focus more on the actual what is, that TTL is it a + +[46:00] you know do we want, that to be a more of a leave at least for you know a given time and, that way you can allow you know anybody to bump, that ledger entry or do you want to have, that be more like, that strict thing like, that you mentioned and. Because this strict definition of teachers seems super I don't know a bit too special I don't know like it doesn't seem like it's implicit it's unlike other types of storage, that we have. But but I disagree here like I think the con the interface, that people are looking for is I want this to live for this amount of time and The Ledger number here is just a good approximation for time like this idea of I want to live this I wanted this thing to live for at least this amount of time and the + +[47:00] actual length of how much it lives depends on like the underlying implementation it's not, that's not the case anymore. Because what we said you know the discussion we had right before was hey let's move the use cases where people don't want archival through those temporary entries the TTL is not about having this strict thing right going on it's about I don't want to parent I just want to be you know cheaper and. If I have to refresh the thing and get it refreshed in some way and their contract in, that case is not the one necessarily responsible for refreshing the teachers or any anybody, that cares basically in, that context yeah I don't know in my mind it still feels like Tom was talking about issue a + +[48:00] and. Then Nico's tackling SUV does this seemed like two infernally different use cases it's not what we're saying is, that. If people don't want this strict TTL they are pushed to use a cable you know like those things, that end up in the archives and. Therefore I end up on some of the more expensive training. But what exists in this space between a strict TTL and archivable Energies what would like give me an example like what lives in, that no this critical is another layer of like the strict ETL I mean I'm fine with this trick TTL it's more like the what does it actually mean what do we mean by strategirl here like it becomes a security feature I'm saying like. If we get rid of the security feature basically allowing the + +[49:00] teacher to be expanded. Then we have something actually, that I to me seems to make sense. Because now we have like. If you want to get into archive use the you know recruitable thing and it's kind of a you know you just use run for, that or you use those temporary things, that yeah. If you want to refresh it's manual you have to do, that and you get a little bit of a discount. Because yeah they don't go to the archive by default and then. If you want this trick like super constricting, that you know never exactly a TTR, that's exactly something and you cannot bump you have to implement, that in your contract. Because yeah again like in terms of use cases what type of Entry. When do you need something with a teacher, that is kind of Frozen, that you can never change well. If you're in Oracle and you wanna + +[50:00] and you have you know you publish like hourly price feeds and you want them to live exactly for an hour. Then you put a TTL of a number of blocks, that constitute fairly like something, that approximates an hour you mean like you don't want, that thing to stay on Ledger or for longer yoga I mean as a record in, that particular scenario you're going to override, that entry right well I think you're getting into the contract implementation you can see yeah I'm publishing some data and I don't want to pay for it you know after, that time it doesn't mean, that you know it's not about I don't want to pay it's not just about I want to pay for it for an hour it's about this thing is relevant for an hour like I don't actually want this thing to be alive after an hour, that's part of the payload like this expiration is the contract puts, that + +[51:00] meaning like you're saying there are no use cases where you don't want this strict thing from the chat a lot of people seem to disagree with, that notion. So what we could do actually is. If I go for an extraction was just like round up to the nearest power four or something like, that and just like allow like users to say arbitrary ledgers and it may or may not last longer. Because because I think as a storage medium I'm starting to agree more with Nico such, that I don't know. If we need to specialize a security feature + +[52:00] feature in a storage interface. Because I think perhaps we should make the storage interface more General I don't know well what I'm saying is, that I'm I think it makes sense to the usability aspect of yeah. When you say the teacher is like 10 legends right. If you don't do anything after 10 measures, that team is basically appears to be deleted even, though it's still in the bucket list I think, that's an implementation detail right and I think, that's totally fine to have, that at the same time I think it should be possible to bump, that, that TTL. If you want to go beyond energy and. When you bump it I mean, that means you have to pay for I mean it's not staying in the bucket list right it's not going to stay at the bottom it's basically kind of like a rewrite first of all we're going to implement it + +[53:00] and. Then yeah in terms of security feature you wouldn't be able to rely strictly on, that layer for security for the for guaranteeing, that something is going to be to live exactly you know for some time like. If you want, that you have to put, that in as part of your attach it to your data basically okay. So Gary it sounds like we are at a bit of an impasse right. Now we probably need to kind of like regroup look at the pros and cons can you like summarize this discussion and the pros and cons of the different approaches, that we've discussed and potentially drop, that on sir bondev later today and hopefully we can asynchronously regroup and think about this + +[54:00] am I still on Gary can you hear me I mean I can hear you I don't know. If Karen can thanks Paul maybe Garen is having some troubles connecting and he's typing something no we cannot hear Garand regardless we are almost at time right. Now so I think this is a good stopping point to regroup and think about these things moving forward Garen will hopefully share this on Sarah Von Dev and we can continue the discussion there we can also people can feel free to stick around and live chat and continue talking thank you all for joining and see you all + +[55:00] next week + +
diff --git a/meetings/2023-04-27.mdx b/meetings/2023-04-27.mdx new file mode 100644 index 0000000000..3f5c6cd099 --- /dev/null +++ b/meetings/2023-04-27.mdx @@ -0,0 +1,192 @@ +--- +title: "Fees CAP and Archival Interface" +description: "Design discussion on unifying Soroban state-expiration storage: rent-based lifetimes for temporary and restorable entries, automatic and manual rent bumps, incentives to prefer self-deleting data, and how (or whether) to hash event outputs for replay integrity." +authors: + - dmytro-kozhevin + - garand-tyson + - graydon-hoare + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - tomer-weller + - tsachi-herman +tags: [soroban] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This discussion focused on evolving state expiration toward a single, rent-based storage interface that supports both temporary (self-deleting) and restorable (recoverable via a separate network role) entries. The group explored how to offer predictable minimum lifetimes without requiring exact ledger-specific TTLs that would harm downstream performance. + +The conversation also covered how rent is paid and extended (automatic bumps on access vs manual bump operations), how to incentivize use of temporary entries, and a final thread on whether emitted events/results should be hashed into ledger-visible structures to preserve historical replay fidelity. + +### Key Topics + +- Unifying temporary and restorable storage under one rent-based model + - Both entry types start with an initial lifetime selection (e.g., short/medium/long) expressed via SDK-friendly flags + - Exact, arbitrary ledger TTLs were discouraged due to performance and overfitting to narrow security use cases + - Contracts that truly need exact expiry should implement it explicitly (e.g., store an expiration ledger and enforce in contract logic) +- Minimum lifetime guarantees vs “fuzzy” expiration + - The system aims to guarantee a strict lower bound (entry lives at least as long as purchased) + - The upper bound can extend beyond the minimum depending on access patterns and rent mechanics +- Automatic rent bumps on access + - Proposal: every read/write access triggers a small, incremental rent bump (a small number of ledgers) + - Includes a ceiling/max so frequently accessed entries don’t accumulate effectively “infinite” lifetime +- Manual rent bumps + - Users can explicitly extend an entry’s lifetime via an operation that targets specific keys + - Deleting and recreating an entry burns the existing rent balance (no refund), discouraging “reset” patterns +- Differentiated pricing to incentivize self-deleting data + - Temporary entries should have a lower rent rate than restorable entries to steer developers toward self-deleting storage when possible + - Restorable storage is positioned as something to reserve for data that cannot be recreated (or is otherwise critical) +- Product/UX concerns about bumping temporary entries + - Tension: some temporary data (e.g., oracle-like short-lived values) becomes useless past a semantic expiry even if storage persists + - Potential mitigations discussed: + - contracts enforce semantic expiry (delete/ignore stale values) + - special-casing very short lifetimes to avoid automatic bumps + - General agreement that a unified interface simplifies developer decision-making (main question becomes “does this need to be restorable or not?”) +- “Witness node” concept for restorable data + - Restorable entries would be removed from validators when expired and stored by a separate role (described as a witness node / deep state store) + - Restoration involves bidding/fees, rewarding nodes that serve requested historical state +- Whether smart contracts should be able to trigger rent bumps + - Concerns raised: + - conditional bumps can damage parallelism by creating extra write-dependencies + - contract-driven bumps can enable griefing/bugs (e.g., unexpectedly forcing large rent payments) + - ambiguity about who should pay when the invoker isn’t the owner of the state being accessed + - An “auxiliary rent bump” idea was described: + - contract can mark which keys should be bumped (`aux_rent_bump(key)`) + - caller supplies the bump amount (including zero) in the transaction footprint + - viewed as niche/complex; leaning toward wallet-managed manual bump operations instead +- Event/result hashing and replay integrity + - Initial thought: hash event outputs into ledger-visible data for cryptographic inclusion proofs + - Shifted focus toward replay-fidelity checks: include a hash of emitted events/return values so historical replay mismatches are detectable + - Preferred direction discussed: keep full events in transaction meta for consumers, but include their hashes in operation results (avoiding larger structural changes) + +
+ Video Transcript + +[00:00] Good morning everyone or good afternoon or evening wherever you are this is the Soroban Design Discussion in, which we discuss topics related to the Stellar protocol and specifically Soroban and today we're going to keep talking about State expiration. So without further Ado I'm just going to hand it over to Garand yeah. So last time we talked about temporary entries. And so today I'd like to continue, that conversation kind of the two biggest topics last time were these temporary entries of these are entries, that have some sort of Lifetime and are automatically deleted after, that lifetime previously we're talking about well in this lifetime should be an arbitrary Ledger and. If we should allow users to Define an arbitrary Ledger or TTL. And then the second issue was whether we should allow users to extend this TTL and correct me. If I'm wrong. But at least my takeaways from, that conversation last week with + +[01:00] that extending TTL was definitely a required feature and, that there seem to be some confusion around the interface whereas like the temporary entries how like this TTL you know Ledger sort of talk or as the restorable entries have this you know rent balance rent fee sort of interface. And so today I'd like to talk about addressing those two issues. And then be working the temporary entry interface to hopefully combine the interfaces and to have like a unified front-on storage. And so as far as ttls, that are specific in the arbitrary ledgers I think we decided to go against, that I think the use case for, that was primarily for security reasons say. If you have like a temporary allowance, that you want to exist for only 10 ledgers no more no less I've been an exact TTL to accomplish, that I think. However from a protocol standpoint, that's probably a little bit of an over specification to a specific security use case and also having those specific ttls definitely + +[02:00] degrades Downstream system performance. When it comes to things like Horizon and RPC. And so I think for, that particular use case it's very easy to implement on the e-smart contacts themselves just to include an exploration Ledger and the strength to actually store. Then the entry. But as far as like a specific guaranteed TTL expiration Ledger, that seems to be outside the scope of the storage protocol. But with, that in mind I think it seemed, that just based on the community comments, that temporary entries required a lower bound, that was strict. But the upper bound didn't really matter, that much. And so and. Because the lower ground, that is like the minimum lifetime is kind of what's more important here what I've tried to do is to unify the two interfaces and have both temporary entries and restorable entries have the same type of fee structure and have the same type of rent balance structure. And so essentially what this interface would do is both + +[03:00] temporary entries and restorable entries you must pay rent now, that works from a user perspective is, that whenever you create either a temporary entry or a restorable entry you can Define the starting Lifetime. And so what we'll do is in the SDK will expose these as flags. And so you could have like short medium and long lifetime or a short lifetime is very short on the scale of like you know 50 events for less than an hour medium lifetime is like you know on the scale of several days less than a week and the long lifetime is about 30 days. And so this time gives a flexibility for the temporary entries as to. If you have like Oracle data, that's very short-lived you can use short lifetime or. If you have something longer lived, that you don't care. If it's still needed and you don't care. If it goes to the archive. When it runs out of rent. Then you can do the meat for long term rent payments. Now what this does is the short medium and long term gives you a + +[04:00] stripped lower bound. And so we can offer the guarantee, that. If the short lifetime says it'll last 15 minutes, that entry will last at least 15 minutes or you know The Ledger equivalent now. Because the Run feed is variable it's possible, that the entry will live exactly as long as we guarantee it will or it could also live longer than, that. And so I think for temporary entries. If there's a security use case where you want to expire after some amount of time and not a cost saving use case then, that needs to be implemented on the smart contract side. Now in addition to this initial rent balance, that you can specify via these Flags temporary entries are identical to restorable entries with terms of rent bumps and the rent payment interface what, that means is, that temporary entries receive a small incremental red bump on every access, which means, that. If a temporary entry is frequently accessed. Then it will live for longer than slower bound and so. If you can imagine like the + +[05:00] most extreme example you could create a temporary entry with the minimum you know balance or the minimum rent balance of say like 15 minutes worth of rent but. If that ledger for, that entry is very frequently accessed in those 15 minutes and it could last for hours days possibly even months. And so this is one of the other side effects of this rent balance feature. But just based on community feedback it's seen, that the lower bound was important to the upper bound less. So this also unifies the interfaces additionally in addition to the automatic Grant bumps on temporary entries you can also do manual wrap bumps on the same way you can storeable entries so. If a user is motivated to have a specific temporary entry last for a long time they can pay you the rent fees and do, that manually via an operation. If they want. But now as far as any cost structure. Because temporary entries are self-deliving we want to incentivize them. And so there seem to be issues with + +[06:00] kind of a archivable fee or like a of a some sort of Base fee for restorable entries. And so I think with this unified interface since both are subject to rent both restorable and temporary entries what makes the most sense is, that for temporary entries to be charged at a slightly lower rate. So for instance you know these aren't the real numbers. But just. If a restorable entry would be charged one excellent per Ledger the exact same size entry, that is a temporary entry would only be charged 0.8 XLM per Ledger or something like, that. And so in this way. Because we essentially have a lower rent fee or we can incentivize using temporary entries and kind of put all as much traffic towards the temporary entries as possible. And so I think generally speaking just you know first thoughts on this unified interface and having this sort of like rent balance feature built into the temporary entries + +[07:00] yeah there's one thing, that I find a bit weird about bumping temporary entries I feel like the product story doesn't really add up in a lot of ways. So let's say for example, that an oracle is creating like a one hour you know like a price, that's like available for one hour like why should cons and from their perspective it is in fact only relevant for one hour. But now consumers are actually going to bump, that ledger entry to live beyond its intended lifetime yes. So this is one of the open questions I wanted to discuss is, that for I think like. If you're going to do like a Oracle for instance where the information is only valid + +[08:00] for five minutes and useless beyond, that. Then temporary rent bumps don't make sense. But then there are other use cases I think someone gave an example like a CK snark used to verify some value, that. If you're continually using, that CK snark it does seem advantageous for, that proof information to remain unchain as long as you're using it. And so I guess the question here is you know do we want to have a universal policy for either temporary entries are always bumped or never bumped or who have some sort of conditional policy like we have an additional flag on the temporary entry, that says this entry type is subject to automatic Bunch or this entry type is not subject to automatic bumps you know, that provides some granularity. But it may also you know complicate the interface a little bit more yeah I still didn't quite understand this example maybe, that's. Because I haven't attended the previous design discussion. But I really don't quite understand + +[09:00] why the temperature should be used for this use case in the first place. If you are interested in certain behavior and. If it is expensive to recreate this entry from scratch. Then the question is why is this not a restorable entry in the first place and in general in weeks away you've been describing it would it be a correct statement to say, that with this proposal temp entries and the only difference between temp entries and recoverable entries as well whether they go to the archive or see tone. So it kind of seems like a bit redundant to have to very similar things, that have only some first maybe we should try to either consolidate this or have a more well-defined use cases. Because like the reason I was always advocating for no bumps on the temperatures. Because they use skates for them is kind of different + +[10:00] and it's something, that you know you want to persist for a short period of time and you don't really care about being kived or pumped and you. If you need more complex behaviors and you probably are interested in interested archived and can be restored yeah I think they disagree just. Because I think, that there's kind of like two use cases we're targeting here I think like in the Oracle case you have very short-term data like five minutes, that's kind of what we were targeting the initial temporary entry Proposal with like a you know details, that never change. But beyond the Oracle use case I think there is a strong motivation or at least the network should have a strong motivation two prioritive times using storage, that's self-deleting just. Because the archive is not a black hole, that we can just throw entries into right like. If you know every single entry type, that's beyond you know the most basic Oracle data needs to be a restorable + +[11:00] entry, that gets to be archived you know having entries thrown the archive as soon as fast having them store on The Ledger but, that's still an issue and still, that you know, that will have issues of scale later on. And so I think. If we can do anything to prioritize using data, that doesn't have to be persistently stored forever and ever we should definitely do, that. And so I think in this particular kind of use cases, that we still offer, that very short lifetime expectation for things like oracles without like 10 or 15 minute very cheap very inexpensive entry type. But we also provide flexibility for say an entry, that is used longer term but, that's not sensitive enough to be put in the archive right. And so I think the archive should only really be used for entry types, that can't be recreated I think kind of demome you said is. If an entry type is expensive to recreate. Then why not use a restorable entry we shouldn't prioritize or we + +[12:00] shouldn't incentivize, that behavior I think. If an entry is restorable at all then, that should be or is a non-resortable sorry. If an entry type is recreatable at all even. If it's a expensive Recreation we should do everything we can perhaps to prioritize, that in the temporary entry space. Because having a you know an entry, that is arbitrarily recreatable get deleted. And then get recreated on the network is significantly healthier for network scale than having Visio entry type, that is arbitrarily creatable. But for some cost saving whatever it gets thrown in the archive I think the archetype should really only be reserved for security sensitive things. And then for things like balances, that can't be recreating, that you need to have you know a true definition of oh. But what is your developer's story for this week how do you communicate. Because like it kind of sounds nice in theory. But not sure how this can be enforced I don't think + +[13:00] it can be right and there is all Second Street well like you cannot make people to create certain sorts of hinges and this thing is. If you know. If I think my entry and it's kind of important to me and they want to maybe recover it'll it's recoverable entry even. So maybe I shouldn't have it's not clear how can we communicate this clearly like our stretch there is already pretty complicated and also it's really. But the temperature is like the thing is, that automatic bumps do not provide any guarantees as to how long doses since these to like you know. If you have some proof or whatever you know you cannot reasonably predict, that even. If someone accesses it will pull on the lifetime of the century enough for it to be useful in other contexts and I + +[14:00] I'm just not sure how can he put any expectations on the temp entry pumps. Because well the entry will be gone sooner or later and it's very hard to argue, that you know fuel exists any given period of time beyond the overbound. So I'm just not sure how would you communicate it and why users won't just use recoverable entries whenever they can like in the context of those oracles I mean I like oracles are like they're going to refresh the you know like the actual entry. So I think it makes total sense to have this autographed feature like unless you have like abandoned entries like an oracle you know, that basically is creating a thing, that is only for valid for like five minutes. And then for the day right and, that's it. And then you + +[15:00] have to catch it. When you can and in, that situation the contract the this Oracle contract should not it should actually be written with the like the actual exploration baked into it like the pattern, that was you know in the dark at some point, that was like you know you look at the expiration time and. If it's past the five minutes or whatever you want it to keep it. Then you actually delete it. And then you don't have those problems yeah and to answer your questions email about why would someone use temporary is not restorable just cheaper right. Because I think you know generally speaking you know a efficient market hypothesis let's say, that smart contracts or sophisticated smart contracts are actually used will use the cheapest print as possible or at least a good developer would. And so I think just by having this financial incentive for temporary entries we kind of solve the use case issue and sure. If someone wants to you know just say I + +[16:00] don't want to worry about oh my data is important I only use restorable entries, that's fine. But they have to pay for it. And so I think having, that Financial incentive and trying to de-incentivize restorable entries. Because from a network Health perspective we want to de-incentivize de-incentivize you know restorable entries as much as possible. And so I think having this more General and unified temporary storage interface does, that in a much better way than a separate TTL based approach would I'd like to comment on this from the perspective of developer experience. So this is maybe less to do with incentivizing users to go to choose one or the over the other or the implementation details of how easy it's going to be for consumers to you know to access this data. But I think you know this unified interface where the only difference between a temp and recoverable storage type. If I'm understanding this correctly it + +[17:00] will lead to you know whether it's restorable or not from the arc whether it's put in the archive or not. When it times out I think, that's going to lead to a better developer experience and lead to develops making better decisions about, which to use. Because we're simplifying the decision process you know by making, that the only difference functionally about what happens with the data you know we're just making the decision process. So much simpler for them. Because a developer just needs to answer the question do I need this to go to the archive or not and. If they can they answer, that with you know yes or no. And then they make the decision and they move on. When these storage types contain other differences you know suddenly developers have got like this Matrix of features and attributes, that they're needing to decide between and you know, that can really lead to more complex thinking more you know maybe making the wrong decision. Because somebody thought they needed this feature + +[18:00] from a recoverable type. But but they didn't really need it. So yeah I think this is good yeah I think I'm kind of like what I'm envisioning is the end story to be is, that we just have a unified storage class I think a previous proposal. So we have to separate you know temporary and restorable storage classes. But this would have a single storage class and. When you create an entry you know there's three parameters you specify the lifetime a short meeting long this is the initial Lifetime. And then you specify the type Vision temporary or you can get used to intercepts change. But temporary like a recreatable and unique or something like, that I think just having, that single story is a significantly better experience. But I think we can talk more to this I think what we're doing is we're narrowing the decision like the decision tree, that somebody needs to go through + +[19:00] like were you basically saying to them you just need to decide. So what, that type is and it has like a much narrow impact on what the result is Lee were you arguing for or against I'm arguing for it. If I suspect maybe I'm misunderstanding something with your question timer maybe I can just try to make sure I understand what you're saying Garen. So the only difference, that the selecting, that type is gonna going to like all of these types are going to have support TTL is, that right well. So under the hood there's not a real TTL essentially there's a rent balance + +[20:00] which is detected from. But the way we have to do this. Because run fees variable. So the actual interface will be like you have like a rent balance of XLM, that's deducted from a variable rent fee. But the interface exposed is on ledgers. So essentially everything will have a TTL. Now you have you can have guarantees at creation time. So the reason why we picked you know 15 minutes seven days and 30 days as our creation times is. Because 30 days is the maximal amount, that we can guarantee past, that there's no guarantees. Because the rental fee can change. But having those I think three initial values is guarantees. Now I think 30 days plenty of days to create the entry and then. If you decide, that you want to last for like six years. Then you can do like a manual. Then bump, that. And so I think those starting values are reasonable + +[21:00] stuff in the chat. If you want to talk about the Oracle issue yeah it's I'm still talking about what I said before, which is I think the yeah like you know let's be clear about this the main reason we're working on temporary storage is. Because we've had discussions internal discussions with some of the like the big Oracle projects out there and this is something, that they've expressed a Desiring and I think, that we're. Now trying to generalize for things. But I think specifically the Autobahn doesn't really sit well with me for Oracle data. Because the Oracle will probably limit the amount of time something is relevant for artificially. If it's. If they can't + +[22:00] do, that within the storage parameters. And then effectively people are going to like you're like bumping an entry into Oblivion like you know like I'm accessing at 4 pm a value, that should be dead by 5 PM and, that's enforced by the contract. So people like keep enforcing it to theoretically live it you know six or seven PM. But at 5 PM the access is cut. So it's like bumping to nowhere and, that's and, that seems to be what we want to do. So I guess there's two solutions I see of this first off like the contract can just you know be smart about this and say. If you access this entry and it's past its expiration date delete entry. So you don't bump into Oblivion also from just a practicality standpoint we do Define a maximum rent balance for automatic bumps to avoid issues like this where like an entry is + +[23:00] used frequently initially. And then never used again. So it has like this essentially infinite balance, that never runs out of. So there is a maximum. So you can't bump it into Oblivion one possible solution is I think for you know medium and long term you know lifetimes, that's like the lifetime measured in days and weeks. Then I think our Mac bombs make sense. But we could also just special case the short-term life this is like the one hour life frame time and just saying hey one hour lifetimes it's supposed to be super ephemeral this doesn't get automatic bumps. But the other types do I think, that's going to be fine from a developer standpoint. Because we already have to special case on the short lifetime. Because just doing the win bucket list is structured restorable entry types can't live.short they must live at least to the medium Lifetime. And so we already throw an error. If you have like a short Lifetime with a you know again we have to figure out the SDK. But essentially you know restorable entry + +[24:00] types can have a short Lifetime. And so I think it would be reasonable to have almost unified interface and still have you temporary entries with medium long terms Autobahn they just have like the very short term not out of bump tone with this satisfies your Oracle taste maybe I'm just concerned about having like yeah different exceptions and rules based on you know things like the lifetime of a ledger entry yeah you think about it some more well I think what we I think the protocol should. Because the way the vocalists are structured it's very negative for performance. If the protocol enables a specific boundaries. And so I + +[25:00] think. If we just essentially build into the protocol, that there are boundaries. But they're fuzzy boundaries and. If the oracles can accept, that I think, that's a significantly better compromise. So it might not be exactly what the oracles were imagining but. If we can get close enough and unify the interface and not distort Downstream systems I would be heavily in favor of, that or you're describing Aaron, that fuzzy boundary, that's just. If folks want to use the fuzzy band like, that feature of the system can still code logic into a contract such, that a ledger entry stores value, that's, that's a more granular TTL. If they really need, that right correct yes. So there's nothing stopping someone from building something, that's less we're just basically saying you know. If you can exploit this capability of the system you don't have to write. So much code your contract will probably a little bit more efficient Etc + +[26:00] yeah and the biggest thing is also fees. Because this is like way cheaper like the short like a temporary entry within a short lifetime will be like way cheaper than all the other storage types. And so from a financial perspective you know even. If the oracles have to do like this like check the TTL automatic deletion it's still significantly cheaper than using any other storage type for this use case just to answer some questions in the chat regarding archival performance the way we're structuring the archival process this is in particular I think Stellar starseed and some others have been talking about the waiver structure the archival process is, that archiving is done very lazily + +[27:00] on ethics. So we only archive a very small amount of entries every 30 minutes to one hour and also you know expiration are kind of thing is done on bucket list merge boundaries and so. While this you know system you know the implementation is somehow complex and not simple it's been specifically designed to be low load upon both validators and boundaries Downstream systems also one small clarification point this is probably poor ordering on my part I apologize we've been throwing around terms like archiver the type of archive we're talking about here we should really like read like this is kind of rename the state expiration. But we still use kind of outdated terminology this has nothing to do with the current Archive of the Stellar network this will be a separate service perhaps we'll say like no this is not validator archive I apologize this is a new type of you know node + +[28:00] perhaps a better terminology would be deep state store. And so you know meta is emitted one you know entries expire. If it's temporary entry it's just drops and deleted. If it's a restorable entry. Then it's delete from the bucket list. But the meta is exhibit okay what is node thank you toner excellent the hardest part of computer science name and stuff. And so whenever the restorable entry it gets deleted from The Bucket List delete from a validator. And then is sent to the witness node and the witness stores it in like a goal-like data structure. And then essentially whenever you need to restore something from a witness node you just submit a bid on the Chain with like essentially like how much you're willing to pay for the restoration. And then the witness node. Then services, that request and whoever services at first gets the reward. So essentially the way this is working is it's you know + +[29:00] optimized for Downstream system specifically and it's not the archiver it's not valid or archivers. But rather it's this new witness node type and the witness node type has a financial incentive to run and structured kind of similarly to bitcoin miners in a way instead of you know it's not proof of course they're actually doing useful work by storing. And then producing these proofs. If that makes sense yeah my apologies for the overload terms okay it does seem like there is like consensus around the unified kind of like storage interface and to some extent I think having a unified storage interface trumps + +[30:00] you know having kind of like super specialized Behavior. So I'm kind of coming around to, that and I think, that. If no one else has any comments of the matter. If you could just like formalize this and dump it in the Soroban Dev Channel later, that would be great does anyone else have any more comments in this specific topic I will take, that as a no on a silence to answer your question we have a CAP-like document it was shared at the beginning of this chat it's called Web proposal temporary or a state or it's temporary entry to expiration is the title + +[31:00] and right. Now it's outdated. Because it's still the TTL version. But I will be updating this throughout the day with what we talked about in the evenified interface. And so the temporary storage proposal document is the one to watch and, that will be updated with this new results to formalize this okay sounds good I see, that Dima has been trying to say something medima you are muted right. Now now you are not muted and we still cannot hear you. So this sounds Discord the same one he's typed yeah. So the storage access interface. So creating an entry versus malfighting entry. So right. Now we have just an overload set function, which both modifies and creates an entry under this proposal I would like to see, that change so, that we have an + +[32:00] explicit create function an explicit modification function, that are separate. And so essentially what I've created would do is create is. When you specify your key your lifetime and your type. And then what are the expectations for red bumps. And so rent is bumped on every access. So this is both read and write accesses the internet bump is on the hook and required and it's Universal. And so what we're going to do is we're going to pick some small rooms of ledgers say 5 to 10 ledgers. And then for every entry, that you read or write you will be charged for logging, that entry five or ten ledgers the thinking being is, that your model this values be very low. And so transaction fees are still low the thinking maybe, that you know. If a contract like such as USDC or something major like, that is used often the lumens is essentially abstracted away. Because you have these automatic Grant bumps and small incremental bumps + +[33:00] that add up and so, that's how it works everything, that you read or write will incur a small bump and we've you know optimized the systems level such, that even, though you're doing a bump lead only entries are still essentially only read only you do have to do a small right. Because of the bottom, that the entry you're writing is very small you don't need them trying to optimize as much as possible. Now in addition to the automatic bumps you can do an explicit you know pump via an operation and in this what you do is in one detail I mentioned earlier with the online Grant bumps before I move on is, that there's also the ceiling such, that you can't you know say like I mean just your own number here say like one user worth of rent. When I say like one year worth of 10 letters worth this is an estimation based off of the current Revenue the current one fee is variable and so, that estimation + +[34:00] maybe overestimation or underestimation level run fee each doesn't change super fast. And so the estimation is good enough for us. And so essentially you have a ceiling I'm saying it's like one year or six months or something like, that such, that. If the rent balance is at or above, that ceiling. Then on an overhead bumps on the background bumps and you're not charged for the online recording love fee. If it's at the ceiling. Now in addition to the small required and automatic randoms there's also a operation, that allows you to put as much rent as you want into the entry, which is called manually and so. If you have a particular King, that you're interested in Beyond just the on top right bumps an example of this would be a balance, that you want to keep live on The Ledger even. If you don't use it very often. Then you can call this operation and buy an auditory amount. So you can put you know a ton of X11 there and have your entry exist almost indefinitely. If you want to but, that's essentially how the event interface works with regards to the + +[35:00] temporary Almanac versus manual rent bumps drop in the chat. So first new eventually Set yeah so. If you want to reset the room. Then you can delete and create. But outstanding rent balance is burned currently the reason for burning we talked about a little. While ago. But essentially it can be gamed. If you refund red balance and so. If you have and have setting right balance. When an entry is deleted you don't get it back so, that's one disadvantage to this quote-unquote reset the dean wants to answer your question I don't think there's ever a motivation for a rent reset like you know. If you're going to delete and create the entry just modify the entry with your new value and keep the red ponds it's + +[36:00] already there. So we don't really know what you're talking about. When you say run reset. But yes it is possible you just don't get your funds back I was said to answer your question yeah yes. So since those are how we have not included explicit run bumps are not currently contract defined. Because that also opens the door for briefing and Luscious uses. If a contract defines like a super large rent bottle like an insane fee and so. Because of, that contracts do not Define red bumps the only way to Bob Reynolds is via an operation where you specify the key. So contracts have new mechanism to Bob rent. Now there is a proposal called ice box or auxiliary about bump, that means what exposed to Smart contracts. If there are no other pressing issues we can talk about, that is on the topic today. But it's not super ambient + +[37:00] if we have bigger fish to fry I know there's also been some discussion around transaction metas and results and whether or not their hashes get included is, that something, that you want to talk. Now what's more important or what's more origin this or garen's topic I think Aaron's topic is more important than my like I might I think my thing will take I can describe in like 30 seconds and I don't think we'd discuss it much. But we can continue with this topic and spend maybe the last five minutes on The Meta issue I'm not scared you're done. And then we can talk about Oxford thumbs yeah I'll save the last five minutes for you, that's good yeah. So I guess to explain the issue. So currently we have like this post one or this operation, that bumps around explicitly and this automatic and Pleasant Run bump. But there is no way + +[38:00] for a smart contract to define a ramp up action the reason for this twofold first we want to maintain parallelism. And so one like kind of like what you would think. If a contract could Define run bumps an expected interface would be let's say you have a value or you have an entry you checked your current rent balance of, that entry and. If the rent balance is below some value. Then you bump it. Now the issue with this is it destroys our data dependency graph. If you allow these conditional rent bumps conditional thing you check the current balance and only bump. If it's below some threshold the reasoning for this is. Because entries in both the read and read write sets are receiving this automatic bump you essentially have this dependency on literally every entry in every transaction or in like a every transaction, that touches the same smart contract. If you allow conditional bumps. Because even. If an entry is in the read-only set + +[39:00] if it's you know. Because of the automatic bumps the rent aspect of, that entry is implicitly turned into a reap right. And so conditional red bumps destroy parallelism. So we don't want to allow, that there are some potential ways we can fix it or do like rent charges in different stages of application. But it gets complicated really fast. And so conditional rent bumps at least from version one or out of the picture. Now a possible second version is to allow green bumps. But not conditionally. And so we would not expose the rent balance to the contract. But would allow a contract to define a rent bump the issue with this is it opens the door for either malicious griefing or just accidental briefing by a bugs. So for instance it would be a poor user interface. If a button a smart contract caused someone to pay 10 years worth of rent on an entry, that would be worthless + +[40:00] in two days right it's just open store to a large amount of bugs it also allows the contract to kind of like Define arbitrary fees, which I'm not sure there's something we want to do it's also not super useful. If you can't conditionally check. So it doesn't seem like a great use case. Because it would be probably a bad idea to for a contract to Define behavior, that every time you save you balance of, that balance by six months or something. If you can't shift around balance. Because you can have very high levels of rent balance and secondly it's not very clear. If you should do an explicit run bump. So for instance say you have like a token contract, that has to be balanced it might see on paper, that in efficient implementation is to check the rent balance and say okay we want this huge balance to always have six months of rent. And so on every call of imbalance we check the balance or we check the limit balance of the balance entry and + +[41:00] then. If it's blowing six months we bump it now, that works well. If the owner of, that token balance is calling view balance however. If a different user say a user wants to charge, that user some amount and they call view balances to check to see how many coins, that person has and they get charged rims, that seems inappropriate. And so it seems to be very difficult for contracts to Define situations or they actually should charge additional rent to the user. Because it's not always clear. If the invoker is the owner of the entries being accessed or has an interest a vested interest and these entries remain live on the ledger. So those are kind of the reasons why. So far we haven't defined rent bump functions, that are exposed to the host contractors over to the smart contracts so, that being said there is one potential idea, that we've been floating around called auxiliary round bumps and there's a section of, that describes this on the second talk I shared, which is + +[42:00] the rent proposal a CDS version doc on the section called box rent on post function. So what this is essentially you can Define this function called ox and bump and it takes a single parameter, which is the key and contracts can Define this and put it arbitrarily and bump arbitrary Keys. Now the trick is the contract can't Define how much to buy the rent by. And so all they can say is this entry should be bumped. But they don't say it by how much. And then and the footprint of the transaction you would have an auxiliary Run pump value, that the user can set to whatever they want. And so for instance. If I'm calling view balance on a token contract and it is in fact my own balance, that I own I can set my auxiliary run bump to six months in the host function, which means, that anytime the smart contract calls logs ramp bump, that entry would be bumped by six months but. If I'm calling the balance on + +[43:00] another user's token balance I don't care about. Then instead of setting you know odds to rent to six months I could set it to zero. And so every call to aux rent bump would be a no-op. Because I don't want to pay an additional rent the advantage of the situation is, that. While there is a manual function to bump rent you need to specify the keys and it's a difficult issue. Because the contract developers know what keys belong to a user. But the user knows their use case for how much bump or, that they want to bump so. If you can imagine a smart contract might have you know three different types of data associated with a single user. So for instance a token contract might have both a balance tied to a user's account, that has one key. And then say a nonce with a different key, that's also type of user. And so to be able to properly use my balance I need both entries to be live now. If I'm not you know very sophisticated or I haven't read the contract code in depth + +[44:00] I may I even believe, that I only need to bump my balance entry. And so I'll find my balance entry key do my host I'll put six months in there but. If I don't want my Notch value. Then it doesn't matter. Because the knots will be sent to the archive it's impossible. And so it seems reasonable for the contract to Define essentially groups of keys, that belong to a user and this aux rep bump function allows contracts to do, that essentially what you would do is the contract would call Ox rent bump both on my token balance entry as well as my token non-century. And so then. When I call, that function I'll have to specify one value aux rent bump six months. And then both entries are properly bumped. And so this is kind of an interface such, that the contract developers can Define, which groups of keys are bumped. And then the caller defines how much those groups are bumped by. Now the issue is I'm not sure how useful this would be in practice and also seems + +[45:00] a little complicated. And so I guess this the question here is do we want to allow smart contracts to Define Rebels at all and. If we do. If it is a strong requirement, that they'd be able to Define bumps is this aux rent bump workable or do we need to think about a little bit more and figure out a simpler maybe easier solution do we actually need, that to be in the protocol I think. If we like I thought last time we discussed having like a self-like self-like you know basically like people standardized on a maybe a method or something, that you know you would increment as a yeah as I said right, that basically tells you a bunch of hints like, that. And then only maybe like incorporate this type of functionality into the protocol. If there + +[46:00] is like something, that ends up you know working for the majority of people I feel like we are trying to dig something at the low level and set it all specific here I think there's like the motivation for this is a bit problematic. Because we're saying you know like calling manually the rent bump operation might be kind of like more complex. But at the end of the day neither of these are actually going to be performed by the users themselves right this is going to be managed by the wallet the user doesn't necessarily understand the difference or care about the difference and from, that perspective these two don't sound it doesn't sound like enough like we gain enough to justify The increased complexity here + +[47:00] yeah, that sounds reasonable to me I was in super strong at this point. So just to finalize this and I'll let Sid talk for a bit. So are we good with moving forward on rent with automatic rent bumps. And then manual run bumps via this operation are we good with, that it sounds like some people are discussing a host function where you provide keys to bump did we decide on doing, that like in contracts you would provide like a list of keys to get bumped. But not the amount well, that's what I was discussing this is what the box front bump is redefine the set of keys to button. But not the amount to button. And then the amount would be specified in the footprint it's a complex and kind of Niche Solutions. So maybe it's not really required okay + +[48:00] you can move on to the event proofs nope let's do it Okay. So we initially decided to include. So right. Now the sorbine events exist in the transaction meta and transaction meta V3 and The Meta is not included in the Ledger it's not hashed anywhere. So we and initially we said we would hash the components of the meta most importantly the events and include this hash somewhere in the ledger. So initially it was going to be in transaction result in a new transaction result. But this would have caused a lot of Downstream issues and we decided instead to move it into the Ledger header. But more recently after discussing with Nico we decided to just not hash it anywhere and just include in the meta and, which is, which you know we should be fine for. Now and. If we need some way to + +[49:00] add proofs for events we can add it on later instead of you know doing putting in a solution now, that may not be sufficient. Now we haven't discussed this much. So this is an opportunity for anyone to provide any concerns with this process. If there are no concerns. Then we can go ahead I mean I'm very slightly concerned, that something we were recording we're going to be not recording in some ways it gives us you know more wiggle room in the in, that we're not you know we can do replay without reproducing the exact same events as last time. But it also means, that. If we do replay and we completely break events for example we won't notice, that fact. So there's a sort of + +[50:00] error checking aspect to recording things, that is being given up here right but, that yeah, that is a good point like events don't go into blender and so. If the it's some sort of a question of whether the events, that are emitted by a contract are considered a canonical part of the contracts interaction with the world or an incidental detail, that you know we can just feel free to break or whatever yeah I don't think we would want to break obviously we don't want to break it. But yeah. So I mean. If it's. If this is a validation question you know on our end we like you know before we add any type of anything related to this we could just add some validation right where we make sure the meta doesn't change, which we might do already right we do, that already yeah. So so. If there's any unit test, that does event emitting in star core, that'll + +[51:00] that'll get caught up there. Because we just record them better directly. But yeah. So I mean from, that perspective I think we should be fine and you know the bigger reason to not do this was even, though we you know in the old proposal we had a way to calculate to like verify cryptographically verify, that the event was in a ledger. But the process was very complex and realistically I don't think anyone would be doing it anytime soon right. So it made sense to yeah I'm I think I'm less there's. So yeah there's two completely different use cases for this one of them is like brief basically you know. So someone wants to argue, that hey I saw this event and you claim it didn't happen and I'm gonna go prove, that it did happen the other is sort of consensus and + +[52:00] integrity like does the record Force us into replaying something precisely or not and I can see, that being both a blessing and a curse we've actually spent a lot of time trying to subdivide things, that we want to be recorded precisely versus things, that we don't want to be like we want like diagnostic events and you know print statements and all this other kind of stuff, that the user can just sort of sprinkle in willy-nilly and, that the operators can bring along and us as core developers can put in without becoming canonical. And so we've had this whole question of subdividing things to canonical or non-canonical in the event stream. Now here you're just saying let's just make basically we'll make the entire event stream non-canonical right like it yeah it's nice. If we keep it the same from one from One release to the next. But it's not actually part of The Ledger and part of me is okay with, that + +[53:00] everyone who has thought about using events for sort of canonical reference purposes feels okay with, that. Because it seems like a big change to me. But it's one, that I can live with certainly it makes life easier in many ways for us do you guys hear me yeah like the thing, that Britain yeah you brought up I think it's something, that I yeah I was not really thinking about, that's actually product. So like for the verification is through the yeah like, which is the thing I was like thinking about. When I said hey let's remove this it doesn't we don't really care I think for the being able to replay historical data you know with High Fidelity this is the only signal we can get and. If we don't include it anywhere we're going to not be able to guarantee + +[54:00] that and until we have you know effect coverage in the absorband in the actual code base for how we produce those events, that I don't feel very good actually about not having this anywhere. So maybe we should actually go back to make put this maybe it's just the advantage or something in the transaction result, which was kind of the info server I mean now, that we're actually back to only one operation basically for Servant transactions maybe this is not. So bad are you saying using the transaction result of The Ledger header no not to use The Ledger header it would be not a normal result for server for the Soroban invoke + +[55:00] what's the name of, that idea yeah and just have, that there and it's we get it for free basically oh in the result yeah it's including the hash. Because that, that's actually the thing I'm really worried about here is, that I'm not sure we can guarantee, that we'll have, that the test coverage will be, that good okay. So thank you sir go ahead yes for historical replay I think, that's the thing I'm really worried about is, that we break events basically for older managers and we won't know this okay. Because this is related to another issue we need to deal with, which is, that in low cost function up right. Now Returns the SC value, which will + +[56:00] actually be a like a vector of SC dollars we moved to multi-invoke so, that should be moved to the meta as well and all, that can be hashed events and the return value can be hashed and included in the in low post-op result, that's what you're saying right. So I thought what we said what the this result should actually become a system event right yeah I'm not sure we haven't discussed like the I guess the format is still up for debate. But yeah the SC value is actually. So the actual result, that you create of the operation should be a hash, that points to Mira, that was emitted basically right. And then the meta would include a bunch of hashes the result. So I mean or events sorry and potentially we can include the The Ledger changes as well there okay + +[57:00] okay yeah so, that was kind of the original plan I guess. But maybe a slightly different now. Because yeah since. Then we moved the SC value results to the events as an event to be an event yeah well just to be clear, that hasn't happened yet. But but we should yeah all those changes make sense together yeah okay I think yeah I think I can update we have some issues and PR's open for this. So I'll go and update them and this makes sense yeah. Because that we were completely blind on this one. So we are going to wind up having basically the just a Fidelity check of like a hash for identity yeah so, that we have a way to guarantee, that we are not breaking historical + +[58:00] replays I mean it also forces us to maintain Fidelity right, which means, that we yeah exactly accidentally emitting an event we're going to have backwards compatibility code, that we have to jam in there, that might get annoying I just wanted to bring it up. But not I'm not I don't feel okay well I'm happy either way I guess anyway I just want to make sure we or saw these there sorry the end at the end of State we're talking about is. So it sounds like we are going to include the hash in the co-stop result of all the results and events. Because the events and the results do the same thing. Now results yeah okay yeah + +[59:00] all right and, that winds up in the header. But we don't. But we're not going to have, that sort of monstrous reproduction of transaction result V2 and no okay. So it's a sort of a mini addition to the Ledger header, that. But we don't have to change the letter editor at all right. If we just change the invoke host of function up result oh. So this. So the result director is got the half now, that's what it sounds like yeah. Because the other way would be making a result translation result D2, which would be annoying, that would be it yeah or transaction meta V2 like you've gone through several iterations with this right there's a version where you Fork transaction result there's a version where you're forked contacted meta yeah we need a new meta. But I don't think we need a new result new transaction result do we actually need a new meta. If we have. If we're just taking this in the up result you still need to put the events + +[01:00:00] and the results like the events of the matter right like you know we have a transaction meta V3 struct I thought oh you need, that in order for the events to actually just go somewhere like for it's for yeah actually extract them okay. So but you're not. going to hatch, that meta no. But you are going to have those events into the transaction result yeah okay. So the events wind up in two different places they wind up they're full their full content wind up in the Met of e3 so, that Horizon can read it and their hashes wind up incorporated into the transaction result, which is just a normal transaction result yeah okay yeah I can live with, that for sure right okay we are after time any final words from anyone okay let's keep the discussion going + +[01:01:00] in Discord thank you all for joining have a great rest of your day + +
diff --git a/meetings/2023-05-04.mdx b/meetings/2023-05-04.mdx new file mode 100644 index 0000000000..13647ed3d1 --- /dev/null +++ b/meetings/2023-05-04.mdx @@ -0,0 +1,205 @@ +--- +title: "Expiration Ledgers vs Rent Balances" +description: "Design discussion comparing rent balances and expiration ledgers for Soroban state expiration, focusing on user experience, downstream system complexity, eviction pressure, and safeguards against rent-fee exploits." +authors: + - dmytro-kozhevin + - garand-tyson + - graydon-hoare + - leigh-mcculloch + - nicolas-barry + - paul-bellamy + - siddharth-suresh + - tomer-weller + - tsachi-herman +tags: [soroban] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session examined a major shift in Soroban’s state-expiration design: replacing variable rent balances with fixed expiration ledgers. The discussion compared the two approaches from first principles, weighing predictability and simplicity against dynamic eviction pressure and resistance to economic exploits. + +Participants explored how expiration ledgers could dramatically improve developer experience and downstream tooling (Horizon and RPC), while also identifying new risks—such as locking in cheap storage early or enabling storage “middlemen”—and discussed mitigations like maximum lifetimes and bounded auto-bump behavior. + +### Key Topics + +- Rent balances (legacy approach) + - Entries hold an XLM rent balance that is decremented each ledger by a variable rent fee + - Rent fee adjusts with bucket list size to create automatic eviction pressure + - Pros: + - Strong resistance to economic exploits + - Natural eviction as network usage increases + - Cons: + - Poor UX (lifetimes are estimates, not guarantees) + - Difficult or impractical for downstream systems to track without re-implementing bucket list logic +- Expiration ledgers (proposed direction) + - Each entry stores a single expiration ledger + - Entry is valid before that ledger and expired after + - Expiration info is emitted in transaction meta on creation or bump + - Pros: + - Predictable, ledger-based lifetimes + - Much simpler for Horizon and RPC to index and reason about + - Cons: + - Rent price is “locked in” at creation or bump time + - Loses dynamic eviction pressure inherent in rent balances +- Network health and exploit concerns + - “Storage middleman” contracts could pre-purchase long-lived storage at low fees and resell later + - Early spam could lock in cheap storage and crowd out later legitimate usage + - Proposed mitigation: + - Enforce a protocol-level maximum lifetime (e.g., 6–12 months) + - Make this maximum adjustable via network vote +- Entry resizing under expiration ledgers + - Rent is charged per byte + - If an entry grows in size, additional rent must be paid + - Expiration ledger should never decrease on resize + - Updates would require paying the delta cost to preserve the same expiration +- Auto-bumps vs manual bumps + - Auto-bumps: + - Extend expiration slightly on every read or write + - Intended to fairly distribute cost for shared resources (e.g., contract WASM, instances) + - Implemented efficiently via small “shim” entries rather than rewriting large state + - Manual bumps: + - Explicit operation to extend expiration to a target ledger + - Open design question: + - additive model (pay only for extension) + - or reset model (burn prior payment and re-buy full lifetime at market rate) +- Temporary entries and security semantics + - Expiration ledgers make exact TTLs possible in theory + - However, because any user can bump entries, expiration is not absolute + - Security-sensitive TTLs must still be enforced in contract logic + - Discussion around: + - whether auto-bumps should be configurable + - whether “non-bumpable” temporary entries are worth the added complexity +- Auto-bump configurability + - Strong consensus that auto-bumps are essential for shared state (contract code, instances) + - More debate for contract data and user-specific state (balances, LP positions) + - Trade-off: + - configurability vs simplicity + - risk of over-bumping short-lived data (e.g., oracle values) + - Leaning toward: + - fewer flags in v1 + - possible future extensions if clear patterns emerge + +
+ Video Transcript + +[00:00] So without further Ado we're going to keep talking about State expiration today and I think Garen dropped a bit of a bomb a couple of days ago. So I think we're gonna start with, that and again. If you can give us a quick overview of some of the suggested changes yeah sure. So up until this point the current Legacy expiration proposal has had this thing called a rent balance. So essentially what you would do is, that whenever you know create an entry or want to do a rent bump you would put XLM into this rent balance. And then every Ledger a variable amount of a fee called the rent fee would be removed from this rent balance and the rent feed could either decrease or increase based on the size of the bucket list and network usage. So the reasoning behind this initially was, that we want to provide some sort of market equilibrium, that as The Bucket List gets larger and as the price of writing to the bucket list increases we + +[01:00] also have increased pressure to evict and expire entries out of the bucket list. And so the thinking with this kind of variable rent fee is, that as the Buckle list grows you the rent fee also grows and so, that you deduct rent balance more aggressively. And so more entries will fall to zero or negative rent balances. And then be expired more frequently so, that's kind of the original thinking as to why we had this variable rent balance and this variable rent fee. Now the issue with, that is there's two primary issues the first is just the user experience story. So with run balance. Because the rent fee, that's deducted is variable from ledger to Ledger you can estimate how many ledgers you think your entry will be live on the network. But you can't know for certain. So for instance say. If you initially paid for 100 ledgers of rent. But you know there are a bunch of merges. So the bucket list size decreased a lot. Then your entry could live much longer conversely. If you paid for say 10 years of rent your entry might only lasts one year. If the network explodes in popularity and the Buckle of size + +[02:00] increases. And so it's kind of weird user interface where the users expect an entry's lifetime to be measured in ledgers. But really you have like this rent balance and like this variable thing you really can't control it and so, that was issue number one is, that it was difficult for users to understand and it didn't have a great user Story the second issue is, that of Downstream systems and so. Because the rent fee is variable from ledger to Ledger you don't know. When or you can't predict what the Run fee will be. And so you cannot predict an entry's rent balance. Now in Stellar Core we have like this bucket list data structure, which is like you know specially designed with this multiple levels and like this log scripted merge tree approach and we've designed the rent balance system to work very well with the bucket list such, that on bucket list DB you always know exactly how much rent balance your entry has and we can do this efficiently without iterating through every entry. And then you know decrementing its rent balance by the current letters rent fee we + +[03:00] can like you know get around it using some optimizations of the bucket list structure. Now the issue for Downstream systems is, that Downstream systems don't have a copy of the bucket list you know they're running captive core. But the way, that a captive Quorum core is currently designed it's too expensive to query a captive core directly and so, that means, that in order for these Downstream systems primarily Horizon and sort of on RPC nodes in order for them to have accurate rent balance information what they would need is to essentially either maintain their own version of The Bucket List and apply rent via like a copy of the bucket list or do some like very inefficient operations with like SQL where you like essentially cache some amount of rent fees. And then lazily apply them in the background or something like, that. But in either case it's a lot of work for Downstream systems a lot of disk I o and things like, that. So those are these are the two main drawbacks and so. When thinking about this issue we thought from our first principles okay do we + +[04:00] need this variable rent fee do we need a rent balance or can we use a definitive expiration Ledger and so, that's kind of what today's conversation about is rent balance versus expiration Ledger. Now how the expiration ledger works is instead of having The Ledger entry store a rent balance field, that is periodically deducted from instead all it stores is a single entry, which is expiration Ledger it's very simple essentially before the expiration Ledger the entry is live and accessible after the expiration Ledger the entry has run out of rent and is not accessible. And then once the entry is passed this expiration Ledger it can be expired, which is. When it's deleted from The Bucket List and then. If it's a temporary entry it could be permanently deleted or. If it's a restorable entry it's deleted. And then stay sent to a state restoration node or something like, that. Now the advantages to the expiration Ledger approach are, that it's significantly easier for Downstream systems. Because they don't have to periodically update entries with rent balance information The Meta, that's + +[05:00] initially emitted whenever an entry is created or an entry receives a rent bump we'll tell Downstream systems exactly. When an entry should expire. And so it's pretty straightforward from the implementation standpoint it also makes a lot more sense for users who expect, that a lifetime is measured in kind of Ledger a discrete and predictable values. Now the drawback to this is, that we can't have this Dynamic eviction pressure feature, that we have in rent balance. So how the expiration Ledger implementation will work with respect to fees is what we would do is take whatever the current market rent fee rate is based on the size of The Blacklist. And then lock, that right in and charge, that rate for the entire lifetime of the object or of the entry. So for instance say you are creating a new entry, that has one year's worth of rent you would be charged one year's worth of rent at the current rent fee. Now the issue with this is suppose, that within, that year The Bucket List size increases significantly essentially what + +[06:00] you would be doing is. Because you locked in, that rage. When the size was small you'd be paying an artificially low rate compared to other entries, that are being added onto the network later. And so you have this weird system where these entries can be kind of grandfathered in. So to speak such, that they pay low fees. Because it's gotten at the ground level. When the network is low. And so with rent balances you don't have this grandfather thing. Because even. If you create an entry on Day Zero answer up on launch day. When the buck list was at you know theoretical smallest size. Because every entry is subject to the same variable rent fee no matter. When they were created the old entries and new entries are charged the same rate however. Because you have to essentially lock in the rate on creation time around bump time with an expiration Ledger approach, that's not possible. And so I think what we want to think about. If we want to make expiration letters work. Because I think the two benefits to Downstream systems and to the user experience are very significant benefits. And so we should + +[07:00] think about expiration Ledges pretty seriously I think, that we need to make sure I think the thing we want to prevent most are kind of two scenarios first where you have a system where users or there's smart Contracting can essentially provide a storage interface for cheaper than the protocol can. If you can imagine say on day Zero. When the bucket list is small someone spins up a smart contract, that has public functions, that exactly mirror the storage functions of, that the protocol exposes. So you have like you know get you know create just the exact same interface. But instead of calling it directly through the Soroban SDK you would just make a call to a Smart contract for your storage needs. And so what this storage contract could do is it could just you know buy up say the maximum amount of Ledger entries possible on day Zero with say like 100 years of rent or some very large value and essentially permanently lock in those entries for 100 years at the lowest possible rent fee. And then say two or + +[08:00] three years down the line you know. If the Stellar network explodes and huge the rent fee will be significantly higher. And so what this contract can do is essentially auction off this rent space at a artificially low rate. And so you can essentially use a middleman contract to get cheaper storage than you could. If you actually went to the protocol directly. And so this is really bad from a network Health perspective. Because essentially every storage call. Now has to pass this middleman. And so you have lots of additional overhead for serving storage just. Because essentially you have a bug and exploitability in the way, that you charge rent. And so I think to prevent against, that specific exploit where you have like these storage contract interfaces what you would need is some sort of upper bound on the amount of Ledger or the lifetime of an entry at any given point essentially the thinking being, that you know. If you allow arbitrarily large amounts of rent purchases. Then you could say + +[09:00] lock in an entry for 100 years, which is far too long. Because the price will probably significantly increase in, that lifetime but. If you have a maximum lifetime of say six months it's unlikely, that a storage contract interface could be profitable with only six months difference between the initial rent fee paid. And then the rent you they'd be providing their users. And so I think the plan is. If we do expiration ledgers is to have a network parameter, which is the maximum rent or the maximum lifetime of a given entry on The Ledger. And so this could be something like six months or one year and this would be a network parameter. And so what we can do is you want this value to be as large as possible from a user perspective just. So you can provide the most flexibility and usability. But you want to be small enough such, that you don't have these rent related exploits and so, that would be a number, that we can change by a network vote just so, that we can essentially tune this value up or down as we see exploits happening on the network or as we see, that no exploits are happening on the network + +[10:00] in addition to this issue we also kind of like the middleman storage contract we also want to make sure, that we're not allowing a bunch of spam entries to lock in very low rates take up space on the network and essentially up the price for everyone else, that's doing legitimate work, that's not spam. And so you can imagine, that on Day Zero. When the you know buckless has a small you have a bunch of airdrops, that take whatever the maximum lifetime is say six months a year or whatever. And then essentially just mint spam airdrop tokens and tell the bucket list size increases such, that's no longer profitable. Now the issue with this is, that they can essentially even, though you know each additional spam token they put on the network raises the rent fees slightly for the next spam token they can still do this very fast and very quickly take up the cheapest rent options with all of this spam and unlike an event balance approach even, though the spam has caused the rent fees to be high for everyone else. Because the spam did it first they + +[11:00] are not subject to those higher fees and you can't evict them, which means you can have like this essentially these events where. If for whatever reason the bucket list size decreases in the rent fee decreases rapidly you can have all these spam tokens kind of come in and fill the Gap in very quickly get the network up to, that you know high rent fee rate again at the cost of all of these spam entries taking up more time than they probably should. And so I think, that's probably where I want to get the conversation started and open up the floor of questions. But those are the biggest pros and cons of each the pros of rent balance being you know this eviction pressure and defining these exploits natively without like the need for like a maximum rent balance or something like, that the con being Downstream systems and usability Garand there was a question around the downstream systems can you just quickly outline the difference or specifically with exploration Ledger what would be the downstream systems + +[12:00] kind of like expectations yeah. So I think the issue with the rent balance versus expiration Ledger is just, that an expiration letter we can put in meta so, that the downstream systems can be directly told what the expiration Ledger is. But there's no way to emit meta for the variable rent balance bombs just. Because they're too frequent. So for instance with expiration Ledger just. Because it's set a single time on creation or whenever an entry has its is expiration or its lifetime extended what you can do is just emit meta, that says okay this key has this lifetime. And then they start in a SQL database. And then whenever you access, that entry you can just spit out the lifetime easy. And so you can contain all, that information in meta with rent balance the issue is the initial rep balance is in the meta sure. So say like this has a starting value of a thousand XLM. But the issue is every Ledger has a variable amount, that's deducted from, that thousand XLM and. Because every single Ledger entry is subject to this variable rent + +[13:00] every single Ledger closed what we'd have to do is. If to contain this information in meta literally submit a meta entry for every single entry on The Ledger periodically with the updated rent information and, that's just not possible. And so essentially the downstream systems would have to manage this rent balance themselves and we could probably emit the variable rent fee as meta every Ledger close. But then it would be the responsibility of the downstream systems to implement essentially the rent balance bookkeeping themselves. If that makes sense foreign it does sound for the most part like a win-win. So I definitely want to un probably focus on like what are the pros of the current Red Diamond system compared to exploration Ledger + +[14:00] yes I think the it comes to eviction pressure and kind of not being exploitable or being more game proof and so, that's the primary issue with locking in a essentially a rent fee at creation time is, that you open up these vulnerabilities for the kind of like a contract storage middleman and for a long life spam entries, that increase rent fees for everyone else. And so those are the two kind of I think exploits, that we want to prevent happening and so. If we can find essentially a suitable limitations on the expiration Ledger like one limitation being not allowing arbitrarily large lifetimes to defeat those two exploits. Then I think it would be a good decision there's also a couple of other drawbacks I think these are all solvable problems. But just something to think about is, that there's certain issues like. Because you're locking in an expiration Ledger there are questions about what to do whenever you resize an entry. So for + +[15:00] instance say you create an entry, that's only one byte large and say okay I want it to have an expiration Ledger 10 years from now and. So you pay the rent fee for 10 years. But for only one byte. And then two ledgers later you say oh this entry is. Now 100 kilobytes. Then you essentially need to reconcile okay what do we do in this situation do we either a shorten the lifetime. Because the rent fee is. Now higher. Because it's you know a larger entry size just for reference the rent fee is charged per byte. And so you need to pay more rent in order to have the same expiration Ledger you did before. And so my current thinking for, that particular Edge case is, that the expiration Ledger should never decrease for instance a write or an update or changing the size of an entry should never decrease its lifetime it should only match or increase the lifetime, which means, that. If you have an entry, that has say 10 years worth of or, that has an expiration Ledger 10 + +[16:00] years in the future. If you resize, that entry. Then you what you need to do is you need to pay for essentially the difference. So you'd have to pay in this example. If you had a one byte entry, and now it's a you know 10 byte entry you'd have to pay for 10 bytes or for the nine additional bytes whenever you do, that update. And so I think there are like issues like, that. And then the two exploit exploitability cases, that rent balance solves really elegantly. Because rent balance you know it's charged every Ledger and so. If you resize it. Then they'll the you know new additional fees will just be picked up on the next Ledger. And so it's handled automatically. And then these exploits are not possible just. Because your rent for your paying is always up-to-date. And so you can never game the system by locking in a rent fee early. And then using it later. When the rent fee is more expensive + +[17:00] is there anyone on stage or in the audience, that want to speak in favor of rent balances okay. So yeah I just want to comment on this from a product perspective I definitely think, that this is a big win. Because the user experience of the previous proposal red balances is definitely kind of like requires a bit of a complex mental model of what rent actually means. Because when people you know pay rent they are used to kind of like locking a specific grade for a specific given of time a specific given amount of time. So I do think, that this new proposal kind of like sits better with like a mental model of what rent actually is even, though we're not using the word rent here + +[18:00] one question, that I also asked on Discord is around a the question of temporary entries and to the question of like Auto bumps and what is what are the implications for these yeah. So I think my current thinking is, that we should still have Auto bumps. But the auto bump should be optional and optional well let me explain what I mean by optional. And so I think in this system we still want an auto bump system such, that you know frequently used entries and shared entries such as contract instances and contract code wasm are paid for and so, that was envisioning is, that before we were like bumping by some you know amount of XLM now. Because we have this expiration Ledger interface we would just Bump by some modest amount like 10 ledgers per access automatically and some of, that would look like is, that you would pay at the current market rate whatever the current rent fee is for + +[19:00] the extension. And so even. If the entries say had like three years worth of rent, that was paid for two years ago. So it was very cheap whenever you access it you would still need to pay for the additional 10 ledgers at the current market rate and so, that's for automatic bumps I think what we should do for auto bumps is especially for temporary entries there were use cases where other bumps are useful and so I think. When creating an entry what you should do is on the initial item creation you can set a flag and, that flag is either Auto bumps true false. And so what this allows you to do is, that the original developer. When you're creating the entry can choose. If this entry is something, that should be bumped or something, that is like you know short-lived. So it should expire. And then whenever you access, that entry, that flag is stored in The Ledger entry. And so the Access Auto bump is determined by, that initial create time flag. And so I think Auto bumps are optional. But they're not optional by The Entity, that's accessing the entry they're + +[20:00] optional based on the entity, that's creating the entry. If that makes sense. And so I think, that makes the most sense. Now another thing for temporary entries is. Because we. Now have this expiration Ledger I know before we kind of went back and forth as to you know. If we should have expert or temporary entries with like firm or exact cut-offs or not or like, that expire on the exact Ledger entries. And so I think under this system now, that we don't use rent balances, that should be very possible and easy to do such, that you can. Now use temporary entries for security features such, that. If you want an entry, that lasts exactly 100 ledgers what you would do is just say make a temporary entry set the expiration ledger to you know or set the TTL to 100. And then set auto bump to false. Now I think we still need to actually I still need to think about, that a little bit more as to. If we can. Because right. Now we are allowing both temporary and + +[21:00] restorable entries to be bumped by anyone anytime. And so I take back, that said they still might not be appropriate for security uses out of the box. But so what I'm kind of envisioning right. Now is the autobump flag. And then in addition to the automatic bumps on access, which both temporary and restorable entries have you can also still manually bump any entry both temporary and restorable via an operation and, that operation is similar to what was in rent balance. But now you just specify the new expiration Ledger. And then my thinking is, that whenever you bump or whenever you pay for more you can either there's two options here sorry October were you saying something no sorry oh. So I think there's whenever you do a manual bump operation to extend the expiration Ledger there are two ways we could think about it first you could either be credited for the amount you've already paid. And then view it as an + +[22:00] extension. So for instance say you know I have a ledger, that's set to expire in a year and I want to expire it to expire in 18 months one potential solution would be the rent bump operation only charges you at the current market rate for six more months of rent it says hey there's already you know 12 months of rent here. So we're only going to charge you for the additional six. So the total comes out to 18 months, that's option one. Now the drawback to, that option is again for, that first 12 months you're locking in a lower rate in, that last six months is. Now at the market rate so, that's option one option two is, that whenever you do a manual bump you don't count the previous balance is burned what this would be is, that. If you know there's an entry, that you want to live 18 months. But it currently lives it currently has an expiration, that's only 12 months in the future you have to pay 18 months of rent at the current market + +[23:00] price. And then the expiration Ledger is reset and, that in this way essentially the 12 months, that it was already there is burned and you're charged for the entire 18 months of the market rate. Now the advantage to this is again I think for a network Health perspective and to prevent capability we want to be charging as close to Market rates whenever we can. And so in this system you always can be charging you know the market rate. However it's kind of a poor ux. Because you're burning this amount it seems, that like in something like this should be probably strictly additive. And so I guess as far as the two interfaces are concerned what are your thoughts as two manual rent bumps, that was a lot I just want to go back to a point you made earlier quickly about temporary entries so. If I understand correctly you are suggesting, that the temporary entries. If we do go for Ledger expert for expiration Ledger the + +[24:00] the contract should be the same, that is we're kind of like letting go of the whole short medium long terms and making it the exact same interface as restorable Ledger entries yeah I think. So now the one thing, though is I initially said they were fit for security uses. But then I caught myself. Because right. Now any user can bump any entry, which means, that even. If you have say like a kyc entry, that's only supposed to last 128 ledgers and you initially set its expiration alleged to be 128 ledgers in the future a malicious user could bump, that using the manual operation. And so I think for security purposes we still need to, that still needs to be enforced at the contract level whereas like in this case the contract would need to embed their own like TTL inside the temporary entry just. Because you have this arbitrary bump I mean I guess we could have a flag, that's like no bump to + +[25:00] Temporary entries but, that might be adding too many flags at this point. Because the current interfaces anyone can bump anything okay thanks for answering, that question going back to Auto bumps for a second I do think you know I've been thinking about the wallet and dApp experience and what they need to think about in terms of State expiration and it does seem like autobump doesn't negate the need for wallets and dapps to actually be attentive to The Ledger expiration times and to act on, that either by like suggesting the user to do like a manual bump or to initiate a manual bump. So it does seem like Auto bump has this like implicit Behavior to it in, which we're like extending Ledger entry expression times. But it doesn't + +[26:00] actually it like adds complexity to the system. But it doesn't actually remove complexity from the implementation of like products it makes things a bit less expected and predictable. So I think yeah go ahead well I think the auto bumps serve a very specific purpose or at least they were initially designed to serve a specific purpose and this is the shared state. So from your example like a dApp or like a wallet right this is not a good example for auto bumps. Because you have a particular balance entry, that you care about and no one else really cares about. And so for a wallet perspective say you want your wallet to live for one year the correct answer is not just to keep you balancing it with via Smart contract. So you Auto bump it or rather just to do a manual operation. And then to bump, that by a year the use case for autobums is more for contract instances and contract wasm particularly wasm is a + +[27:00] difficult one. Because you can have a single wazzin blob, that many different contract instances use and the question is who pays for, that was and blob. And so essentially by having like this Auto bump feature, that every user who touches it is required to pay a little bit and you can essentially like share the cost of you know this contract and this contract instance or this contract code in this contract instance among all the users who are using it. And so I think the other bump feature was specifically for these kind of shared resources, that there is no clear owner for instance like USDC it'd be kind of a crappy interface. If you know 10 000 people used it a day. But there was no Auto bump. And then the 10 000 in first person had to go pay rent. Because it got archived or something and he was just the or they were just the The Unlucky user, that drew the Short Straw. And so I think there definitely still is a pretty strong use case for auto bumps for these sort of different entry types. But I agree for something like a + +[28:00] adapter a wallet you would still need you know this manual operation in addition to audit bumps oh would you recommend a ledger entry, that is a user specific like a balance or an LP position to be Auto bumpable or not Auto bumpable I mean I think it just depends on the contract implementation I don't think it needs to be Auto bumpable the same way, that contract instances and stuff do. But I don't see like a downside I think the general thinking is the kind of design model I had in my head is, that extending a lifetime should never be negative, that like this is why it's not good for security cases. Because anyone can extend any lifetime such, that it should always be a positive action it cannot be a negative action. And so I think you know I think there was talks last + +[29:00] time of exposing an Autobahn flag and I think this probably makes the most sense, that you know contract instances and contract wise in code you don't have this option whenever you deploy it has it must be Auto bumped. But then for any other key you create I think you probably just give the option too or should we just. Because there's also like I mean you could also say, that the contract instances and contract code are Auto bumped and nothing else is. But then you can kind of get into this sticky situation where there are still contract types, that have shared States. So for instance. If you can think like a DEX right and there's like this asset pair, that start as an entry and many different people are like viewing or trading, that asset pair you wouldn't want one individual to be stuck with the bill. And so I think, that there are still I still think Auto bump is a powerful primitive and I think it should probably be enabled by default just. Because it's the safest route. But I think you know having a option to turn it off especially for personal State like balances is probably + +[30:00] a good idea. And so now, that I've talked for a little bit I think the answer your question I would probably say for something like a token balance I would probably say no to the auto bump Behavior just. Because it's something, that will probably like the lifetime of, which will be explicitly managed by a wallet. Because I agree it's kind of needed the one thing, that maybe like I'm thinking about there is, that. So for the first version we can do you know something like, that right like I would imagine, that maybe in the future we would want to have like maybe something, that kind of Auto Tunes over time. Because like in the example of like the you have like a very you know active contract the wazam is basically going to be used multiple times per Ledger and you end up with really reaching your + +[31:00] limit fairly quickly and then. When the limit is reach you actually are backed into, that situation where you know some users are going to bump some are not going to bump like basically the first one in The Ledger all right and the bumping. And then yeah and, that and also like the. If the what. If the blob is fairly large maybe, that auto bump on the ends up adding quite a bit of cost to each individual transaction. If it's you know bumping like for a good number of Legends let's say you know 100 ledgers or something to keep in mind Garen there is a question about the mechanics of Autobahn from Paul can you expand a bit on. When does Auto bump actually occur yeah. So the current strategy is, that autobump occurs on all access, which is + +[32:00] both read and write access how this works under the hood is, that you know in addition to having the expiration Ledger stored in The Ledger entry we also have like this kind of shell entry type, that is used for read-only Access. So for instance like for your wasin blob they're only access read only and essentially to modify a ledger entry in The Bucket List you have to rewrite the entire entry at the top level bucket and so. If we were to modify the you know the entry or the expiration Ledger directly in The Ledger entry you'd have to rewrite the entire wasin blob. And So to avoid this we have like this kind of shim entry type, which is just an expiration Ledger extension. And so this entry is very small it's literally just a key. And then the new expiration Ledger. And so we use this entry type whenever we want to bump a read-only entry. And so in the wasm use case even + +[33:00] though you're only reading the wasm. Because we are Auto bumping you do have to do a small write but, that right is very small it's the minimal size right you can do. And so we are implicitly turning every read into a read write. But we're not rewriting the entire entry we're reading the entire entry. And then writing a very small entry with the new expiration Ledger. And so under the hood, that's how we implement it efficiently rent bumps for both reads and writes. So I do want to touch on something, that you mentioned before, which is the question of with should bumping like pumping in general should, that be a flag it does feel like you know last week we talked a bit about you know very various Oracle usage + +[34:00] patterns and you know we got to inclusion where sometimes the contract developer would want to limit or give an upper boundary to. When a ledger entry should exist and making it non-bumpable is a very kind of like easy way to do, that the question is like does, that over complex the system well. So I guess we have two different questions here I think we have the question of do we want to does the entry Auto bump. And then does the entry allow bumps. And so I think from an implementation standpoint these would be very easy to implement we could just you know thrown a Flags field on The Ledger entry and Define a couple of flags I think the question is this you know making the user experience too complex. And so what this would look like is I + +[35:00] think this would only be explicable to Temporary entries I think or Okay. So let's talk about the Autobahn flag first I think the Autobahn flag whether or not this item is auto bumped on access it could be an optional flag for both temporary and restorable entries. But I think it should be strictly enforced for contract wasm and contract instances. Now for the should you allow bumps at all I think we could allow, that flag but, that flag could only be used for temporary entries. And so this would be the use case where you either have like a security use case where you want this to do exactly some number of ledgers or the Oracle use case where the thing is only valid for five minutes or whatever and then. If you set this flag. Then this entry would not receive Auto bumps and it would also not be bumpable by the manual operation and. If you tried to bump it would just fail or Panic or something like, that. Now you would only want this black on temporary entries. Because restorable entries and unique entries should always be bumpable + +[36:00] they don't necessarily need to always have Auto bombs. But they should always be bumpable just. Because they are important information, that needs to be saved, which is why they are subject to being sent to the state expiration node. When they expired. And so just. Because the design parameter for this entry is, that it's supposed to be important live State there should be no use cases where you wouldn't want to bump or wouldn't want to allow a unique or restorable entry to be bumpable. And so I think, that is the most technically complete kind of interface it's very you know feasible from a core perspective the only question is. If that's too much complexity at the end user foreign I think, that as you described this. If it creates more Divergence between temporary entries and restorable entries. Then probably for the sake of Simplicity I say we shouldn't include it especially. Because this is something, that the contract developer can program for in + +[37:00] their contract I will say. If we go this route and say. If we I think. So so. If I'm understanding it correctly we don't want Divergence. And so we'd have an autobump flag true false. But we would not have the no bump flag I think. If we go this route we need to be very clear in our documentation, that the expiration Ledger is not absolute. Because if you know have like a temporary entry self-delink entry. And then there's a field called expiration Ledger it's a reasonable assumption, that The Ledger would be deleted immediately after, that ledger and. If we don't have this no bump flag, that's not the case. Because a malicious user could you know invoke the operation and bump any temporary entry even. If autobump is disabled. And so you know I think, that's a fine interface decision to make, that all entries are bumpable. But I think we need to make, that very clear just from a ux perspective so, that we don't have security issues with temporary entries being used improperly + +[38:00] any questions or comments on this what do people think about should we have the bump bumpable flag I'm just going to type a big thing. But I could actually say it I think I would vote for having not having an Autobahn flag. If possible just having a fewer configuration options for the different types of storage like there's two different types of storage and they act differently I think, that's fine to explain and understand but. If you have to say okay there's restorable entries and they act in this way unless you enable auto bump on them. And then restore volunteers like you know what I mean there's like four different configurations. Now versus just two. So Paul are you talking about the bumpable flag or the auto bumpable flag sorry yes sorry pumpable okay + +[39:00] yeah I think, that makes sense to me it's just not everything is bumped. But just. So I understand are we still interested in the Autobahn flag or do we also want to Nick say, that and just say everything I bumps as well I definitely don't think, that everything should be Auto bumpable by default yeah it does sound like for from a completeness perspective having both of these flags kind of like covers most use cases I mean one thing we can do is just like. If we don't want to expose the bumpable flag. Now we can just Define it in the XDR and defined in core just not exposing the SDK. And then turn, that on V2. If we want to you know have a more complicated ux Matrix for storage, that would at least give us future + +[40:00] proofing or like I think actually. If we just. If if we were defining Auto bump anyway. Then we need to have a Flags field for the album flag. And so we can easily extend it later. And so I think you know the bumpable versus no bumpable, that could be a V2 feature. But I think unless there's like a strong one for it. Now we can just leave, that off for. Now and just have the autobump flag I'm not sure, that the Autobahn really benefit the network it might benefit the ease of use. But not necessarily the network itself right sure I guess the you are having more IO churn for sure I guess the there didn't seem to be a great solution for the contract instance and contract was in the case without Autobahn. So I think you know it for it + +[41:00] might be useful to say maybe like I think those two entries need something like Auto bump or need some way of essentially like automatically or pooling rent together in some way. Because that was kind of the thing we were trying to think about with the contract wasm is okay you have like four or five different instances, that all have the same was and back end how do you equitably and equally share the load. When it comes to expiration ledgers I think, that my the direction, that I'm thinking about is, that. If someone was uploading a contract I want to see his hotels involvement in the future on maintaining, that and not. So much on leaving it and assuming, that it will be funded by + +[42:00] someone else like I want to see like a activity right on, that contract from the owner and not by someone else, that might be inherently using it oh fishes, that contracts don't have an owner like who doesn't can be reported by anyone and you pay anyone. So you know. If you have a the creditable implementation, that people are using there is no clear owner yes someone might have written in the code. But anyone can deploy it and yeah it's kind of hard to track to anyone and demand you know maintenance from someone I mean you could expect it. But it's like the Canada universally demand this + +[43:00] yeah I feel like there's kind of the concept of like a library contract is definitely a big thing on Ethereum. And so it feels like an unfair expectation to write something like a dApp implementation, that has you know, that's, that's more of a library function and expect the library owner to be the sole payer for, that even, though other contract instances are using, that Wiseman making money off the wasm it just seems weird, that. If five you know different contract instances are using and profiting of wasm, that only the you know entity, that originally deployed the wasm should be on the hook for paying for it. So I would argue, that. If someone is using something for free. Then he shouldn't instead you should either pay the author right and generate money right out of, that right or basically copy it in from now on he would be the owner of, that + +[44:00] piece what do you mean by copy, though I feel like, that's why it works for network perspective. Because now you have like five identical copies of the same ones yeah the current design is exclusively like encouraging sharing the code. Because we don't want to store it tens or hundreds of duplicate version drops. Because it's the biggest lecture entries by far. So you don't want to encourage code to paste and you know fragment system or anything and again wait I think I went around here. But in dth for all like a lot of the things I implemented by proxy patterns and Stuff Etc again a lot of instances are referring to say another single contract instance and you know you do not deploy your own like copy of unislove and you do not maintain it instead of it you can just refer to a proxied instance of unisol, that is yes + +[45:00] getting updated by someone probably. But I don't think it's fair to say, that a Do Your Own Thing yeah I know I would be happy to see some incentive model Incorporated here in is part of, that. But you know maybe, that's something, that we need to you know like think separately of the countries do you have any thought about, that yeah I think we should allow contracts to be detached from a concept of an owner you know for a lot of legal reasons this is better for some contracts you know like the uni swap contracts are not upgradable they don't have an owner + +[46:00] and, that's the way they want it like it exists like, that for a reason right. So I don't think we should kind of like own over emphasize this concept of an owner, that's a good argument Aaron do you think it would be reasonable to constrain Auto bump to these entries, which are the ones, which are going to benefit from the most. So not all ledger entry is not contract data. But just the contract code and the contract instances yeah I think contract code contract instances for sure. But I think there's also the reason why I wanted to Define autobump is I think there's also areas where contract data should be Auto bumped. So like for instance like. If you have a tax. And then there are like entries on the DEX, that many users of the DEX use the Audubon feature is kind of just so, that these entries are kind of like paid for by all users instead of you + +[47:00] know one unlucky user. So I think there's still there are definitely use cases where you'd want something like an autobump primitive for contract data. However I could definitely see a use case where this is not the default as a flag where contract instances and contract wasm receive bumps by requirement. And then data defaults to false. But you can turn it on. If you want. Because I still think there are definitely use cases where you'd want something like this for contract data I don't know. If it's a common case. But there definitely are significant cases I think it's a very common case like effectively everything in a contract, that is like a global variable or Global state, that doesn't specifically adhere to an account like you know like in a liquidity pool the actual like pool values are global and need to be bumped we have been discussing this for some + +[48:00] time what about explicitly change the data to the contract instance like do we anticipate some short case where you know the data is kind of global. But it's not once per contract or something. If not. Then maybe we just for the small Global state of the contract in the instant century. And then you know it's subject to the same Ultra bomb obviously. Because if you think about it's kind of a part of the contract, that's another consideration here I mean I guess the question is how monolithic do we want to be right. Because the issue with, that is, that the. So say the advantage is, that like. If you have like a contract instance, that's like has a very large amount Global state every call must bump all, that state whereas like I said for instance where the liquid equals. So there's like a poor implementation, that has like 10 + +[49:00] different like asset swaps or something. And so even. If you only access one you have to bump the other nine implicitly whereas. If you don't title and you keep everything individual. Then you only have to bump the entries you actually touch. Now I don't know like in practice contracts might be small enough, that this isn't really a big issue it definitely does simplify some things and decreases our right amplification as well. Because you'd only need to write at most one bump per contract instance. So this effectively goes sorry go aheadly I think it's difficult to distinguish between Global and individual. Because like they're definitely going to be contracts, that have like nexuses like where it's not Global. But maybe it involves like multiple participants. So I don't know like where do we draw the line between something should be a shared cost versus + +[50:00] being an individual cost seems too difficult to do, that yeah other than like the really General cases of contract code should definitely be a shared cost like in for contract data I don't think we can make, that call like I think the country developers do foreign ly think the safest option is just to default this to true. And then maybe expose a flag just. Because I feel like the benefit or the drawback to Auto bumping something where. When it shouldn't be Auto bumped is very minimal. Because the whole idea is, that for the bump to be a small bump such, that. If lots of users are accessing it over time it grows. But for instance right. If you have a balance it you know and you say view balance and you have to pay 10 extra ledgers of rent on the one entry I mean, that will literally like, that might be less than like an Excel I'd be measured in strips. And so I feel like the drawback to not having autobombs can be high. If you + +[51:00] have a shared you know Global Entry, that you forget to bump. And then for the entire lifetime of, that contract you have to manually bump it's expensive I feel like, that's a much worse failure case than the other case where you sometimes have to pay a small additional fee to access some entries leaning into, that is there is a really a big downside to not requiring Auto bump like what's not having the option not making configurable oh yeah just like requiring it like universally yeah I personally think there's I think there's not a I think there maybe is one negative use case we have Oracle data, that should really only live for like five minutes or ten minutes it would be really annoying. If you know especially. If this data is accessed often in like a DEX sort of environment I think, that would be really annoying. If you access this thing. So much in a five minute lifetime such, that it always lives for like six hours or + +[52:00] like two weeks after you know even, though you should only do five minutes. So I think for like very short-lived entries and like Oracle data there's a strong use case outside of, that I don't see a super strong use case for not just repairing oil pumps everywhere see are you suggesting setting into true for every type of Entry or just for restorable entries. Because like for the Oracle use case, that you're describing Garand wouldn't they use temporary entries for, that yeah yes I think. So I think right. Now we are talking about a unified interface where both restorable and temporary have the same expiration Ledger interface and both have the same Auto buttons yeah I feel like contracts put in contracts avoid the issue, that you're describing you know where you accidentally continue to rebump a temporary entry by I guess like moving on to using like a + +[53:00] contract, that uses a temporary for five minutes I guess how would people be referencing, that like an hour from now well no so, that's the issue right. So it's, that no one is referencing it. Now and for. Now but in, that five minutes enough people referenced it such, that it was aren't bumped. So much, that it will live significantly longer than five minutes. So the issue is no one will access it within an hour. But it will live you know an hour two hours or. However long. So this I think what you're just I guess. So what. If Auto bumping wasn't didn't bump it a fixed amount what. If it bumped up to some CAP I mean it does bump up to some CAP. Because we have a maximum rent balance for a maximum expiration Ledger due + +[54:00] to you know issues we discussed earlier. And so there is a CAP it's just, that the issue is, that you know, that CAP will make sense for some entries. But doesn't make sense for others. So for instance like. If the CAP is six months and in the extreme case you have an oracle entry, That was supposed to last five minutes, that lasts up to the CAP of six months, that's a lot of wasted fees okay. So it sort of feels go ahead sorry it's sort of. So this is the last thing I'm going to say I think on this it sort of feels like Auto bump okay we could argue, that what Obama should be configurable per entry. But it also sounds like the amount of this thing should be Auto bumped before. If we're making a configurable it's not really a binary yes or no it's more like an amount + +[55:00] like as a contract developer the contract developers really probably like the person who's gonna know best like how long. If this is going to get bumped it needs to be bumped for or I guess it's some combination of the contract developer and the user. But yeah just I'm a little skeptical of allowing contracts to Define you know like access fee amounts right. Because if a contract was like oh you know like even. If it's not malicious it's just like a stupid design it's like hey this should have six months and it's like a you know a thousand or it's like a you know 10 kilobyte entry, that you know requires a six-month bomb just to access, that's a pretty poor ux yeah I'm like this is just the slippery slope of configurability like the more I yeah in some ways maybe we need you know to Paul's Point around like the not making things configurable + +[56:00] maybe this is the trade-off we make it okay like temporary entries might get bumped more than what they need to they might live around a little bit longer but, that's a trade-off for all these other reasons okay. So we're at time it definitely sounds like everyone's on board with Ledger exploration moving on from rent balance there are the questions of, which flags we expose and there is the question, that Dima raised about revisit revisiting the idea of contract attached state, which we decided against in the context of metadata but, that wasn't yeah it wasn't a landslide of + +[57:00] opinions there. So maybe it's worth rethinking about, that. So again it does sound like you have a lot to work with right. Now in terms of Ledger exploration I would try to summarize this question about, which kind of Flags We've we've just debated and starting a discussion around, that to give people some asynchronous time to think about, that and Dima. If you could kind of like resurface this idea of kind of like contract attached state and the benefits of, that both in this context and in others I think, that could be beneficial Garen is there anything else you know, that sounds good okay Dima I saw you unmuted for a sec there yeah I just want to just say, that + +[58:00] yeah. When I have some time oh right I guess is, that my nature with the contract data you said through the time yeah. So I don't know. If you have enough time to get it shipped into the one even everyone is on board. But yeah I need to think a bit more about this awesome okay thank you everyone it's been a great session see y'all next week + +
diff --git a/meetings/2024-01-18.mdx b/meetings/2024-01-18.mdx new file mode 100644 index 0000000000..9b87ddacb7 --- /dev/null +++ b/meetings/2024-01-18.mdx @@ -0,0 +1,50 @@ +--- +title: "BLS12-381 Zk Curve Use Cases" +description: "This overview highlights Soroban smart contracts, Soroban RPC, and smart contract development." +authors: + - alejo-mendoza + - alex-mootz + - dmytro-kozhevin + - ernesto-contreras + - graydon-hoare + - leigh-mcculloch + - morgan-wilde + - naman-kumar + - plamen-hristov + - rohit-sinha + - tomer-weller +tags: [developer] +--- + +import AudioPlayer from "@site/src/components/AudioPlayer"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1196897067445010452) + +This discussion focused on whether and how zk-enabling encryption curves—specifically `BLS12-381`—should be supported for Soroban smart contracts. Participants explored concrete ecosystem use cases and weighed the technical tradeoffs involved in exposing cryptographic primitives at the host function level. + +The conversation balanced developer demand for flexibility in emerging zk systems against concerns around performance, abstraction level, and long-term crypto agility. It also touched on practical development workflows, frontend transaction behavior, and gaps in current RPC visibility. + +### Key Topics + +1. The need for zk-enabling encryption curves like `BLS12-381` +2. Use cases that ecosystem is interestd in: + 1. Excellar, i.e. folks that kicked off this conversation by submitting a [PR for BLS12-381](https://github.com/stellar/rs-soroban-env/pull/1310), wants to add a DAO-controlled oracle where the elliptical curve provides the ability to add new DAO voters + 2. Zkbricks wants to build an L2 system for that enables secret state for arbitrary smart contracts + 3. Skyhitz wants to use Stellar for efficient compute, cost, and scalability while using zk to prove ownership of high-value assets on another chain + 4. Use case enumeration continues in the [Discord thread](https://discord.com/channels/897514728459468821/1197663875512942653) +3. Considerations for host function implementation + 1. Core devs questioned whether BLS12-381 was the right curve and also highlighted the need to determine the correct level of abstraction given there is a tradeoff between flexibility and efficiency. Lower level of abstraction will enable more flexibility but result in more hot loops in the wasm while a higher level of abstraction will be highly efficient but will restrict generality. + 2. ZkBricks thought that there is a need to directly expose pairings and group operations without any level of abstraction. The space is in active development and flexibility is needed to try out new approaches and proof systems. From the point of view of crypto agility, it would be good to expose a generic interface that supports a variety of curves in the backend. +4. Path Forward + 1. Core devs mentioned crypto curves can be experimented locally by linking rust crates, which it turns out, had failed in the past. This will be explored and fixed. + 2. ZkBricks and others will prototype locally and provide feedback. +5. What are the best practices for managing transactions in the frontend, with respect to transaction ordering. +6. Core devs confirmed that ordering is intentionally arbitrary. +7. Request for an API for current version of the environment/sdk +8. [GitHub issue](https://github.com/stellar/stellar-rpc/issues/10) filed for the RPC to return versions of the current node. + +### Resources + +- [BLS12-381 Discussion](https://github.com/stellar/rs-soroban-env/issues/779) diff --git a/meetings/2024-01-26.mdx b/meetings/2024-01-26.mdx new file mode 100644 index 0000000000..e4d7ee3853 --- /dev/null +++ b/meetings/2024-01-26.mdx @@ -0,0 +1,39 @@ +--- +title: "Fee Bump Testing and Contract Discovery" +description: "This overview summarizes discussions on a fee bump bug, Horizon API changes, testing workflows, and approaches for discovering Soroban smart contracts." +authors: [john-wooten, kalepail, tomer-weller] +tags: [developer] +--- + +import AudioPlayer from "@site/src/components/AudioPlayer"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1199121845656486009) + +This developer call covered several operational and ecosystem topics affecting Soroban developers and validators, with particular focus on a recently disclosed fee bump bug and its implications for upgrades. Participants also discussed API deprecations, testing challenges, and practical questions around smart contract discovery. + +The conversation balanced short-term operational concerns—such as validator upgrade timing and Horizon behavior—with longer-term developer experience improvements, including better tooling for testing against historical ledger state and clearer paths for observing deployed contracts. + +### Key Topics + +1. Plan and schedule for these meetings + 1. Protocol meetings every other Thursday at 4pm ET + 2. Developer meetings every other Friday at 1pm ET + 3. Will continue to adjust as needed +2. Fee bump bug — [discussion thread](https://discord.com/channels/897514728459468821/1200432249594707998/1200432306314281000) + 1. Fee sponsorship bug: unused fee is refunded to the inner tx source rather than the sponsor source. + 2. Fix in new release. Up to the ecosystem and validators to upgrade. The fix will likely be rolled out before Phase 2. + 3. Up to validators to determine if they’d like to push the v20 upgrade date to wait for the fix; or upgrade with current release. +3. TxMeta Deprecation in Horizon — [announcement](https://discord.com/channels/897514728459468821/900374272751591424/1199438109796999298) +4. Ideas around testing against ledger snapshots — [request](https://discord.com/channels/897514728459468821/1199121845656486009/1199158421254049912) + 1. Define the needs a bit more clearly + 2. Definitely something here we should be addressing to make testing against specific ledger state easier +5. How do you get a list of smart contracts? — [thread](https://discord.com/channels/897514728459468821/1199121845656486009/1199739331078803496) + 1. Observe create contract ops as ledgers close + 2. Use an [indexing service](/docs/data/indexers) +6. What is the status of contracts caching support? — [question](https://discord.com/channels/897514728459468821/1199121845656486009/1200484710447587490) | [response](https://discord.com/channels/897514728459468821/1199121845656486009/1200516877680644276) + +### Resources + +- [Fee-bump bug disclosure announcement](https://stellar.org/blog/developers/fee-bump-bug-disclosure) diff --git a/meetings/2024-02-01.mdx b/meetings/2024-02-01.mdx new file mode 100644 index 0000000000..2c8a94cb51 --- /dev/null +++ b/meetings/2024-02-01.mdx @@ -0,0 +1,42 @@ +--- +title: "Secp256r1 WebAuthn Host Function" +description: "Discussion on adding a secp256r1 verification host function to Soroban to enable WebAuthn-style authentication, improve UX for wallets, and bridge off-chain auth with on-chain logic." +authors: + - graydon-hoare + - kalepail + - leigh-mcculloch + - naman-kumar + - nicolas-barry + - rohit-sinha + - tomer-weller +tags: [developer, CAP-51] +--- + +import AudioPlayer from "@site/src/components/AudioPlayer"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1201979721211203614) + +This session explored a proposal to add a Soroban host function for verifying `secp256r1` signatures, primarily to support WebAuthn-style authentication flows. The motivation is to better connect off-chain authentication (passkeys, phones, platform authenticators) with on-chain authorization, improving usability for end-user applications like wallets. + +The discussion focused on scope (verification vs recovery), developer experience implications, missing primitives needed for WebAuthn payloads, and how such functionality fits Soroban’s “batteries included” philosophy. + +### Key Topics + +- The proposal is to advance `stellar-core` by adding a host function to verify the `secp256r1` signature, which is the most common elliptic curve used outside of the blockchain space. It is useful in connecting off-chain authentication interfaces with on-chain functionality. +- Note that the proposal is not for a new signer type but a host function. +- Leigh investigated adding support for the WebAuthN use case, by allowing a custom account / smart contract to sign soroban auth entries using a secp256r1-signed payload. +- `secp256r1` is supported by phones, passkeys, and enables an app to replace passwords. This is a massive benefit to user-facing applications like wallets. +- Pros and cons of the interface: blockchains generally implement the recovery interface over the verification interface but verification is easier for developers as it reduces burden on the client and the network. +- The WebAuthN use case requires encoding and decoding of `base64` payloads and decoding JSON blobs, which is not currently supported in Soroban. +- While there are hacky ways of accomplishing the latter, it’s not a great developer experience and final implementation is susceptible to breakages on updates. +- It is also costly to bundle decoding with verification in guest. +- Soroban has always led with a batteries included mindset. Keeping in line with that approach, it makes sense to further investigate and determine whether a host function makes sense for these as well. +- Leigh’s implementation may require further evaluation of the crates used for ECDSA and `p256`. +- Brief discussion around proposed process for adding of a host function by a non-core dev. + +### Resources + +- [GitHub Discussion](https://github.com/orgs/stellar/discussions/1435) +- [CAP-0051: Secp256r1 Verification](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0051.md) diff --git a/meetings/2024-02-08.mdx b/meetings/2024-02-08.mdx new file mode 100644 index 0000000000..40915ec607 --- /dev/null +++ b/meetings/2024-02-08.mdx @@ -0,0 +1,41 @@ +--- +title: "SEP-42 Stellar Asset Lists & Ecosystem Primitives" +description: "Discussion of SEP-42 Stellar Asset Lists: goals, structure, and how token lists can serve wallets, dapps, and ecosystem discovery alongside broader ecosystem primitives and documentation feedback." +authors: + - elliot-voris + - kalepail + - naman-kumar + - orbitlens +tags: [developer, SEP-42] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1204462856037470248) + +SEP-0042 introduces a standardized Stellar Asset List format intended to make asset discovery, curation, and presentation more consistent across the ecosystem. This meeting captures a community walkthrough of the draft along with broader discussion about ecosystem primitives and how developers experience Stellar + Soroban documentation today. + +The session centered on what a token/asset list should contain, how lists might be curated and consumed (wallets, explorers, dapps), and where the ecosystem needs clearer conventions so that different clients can interoperate without reinventing metadata patterns. + +### Key Topics + +- SEP-42 Stellar Asset Lists overview + - Motivation for a common, portable asset-list format + - How lists can improve asset discovery, display, and trust signals in clients + - Considerations for list hosting, versioning, and update workflows +- Ecosystem primitives enabled by standardized lists + - Reusable metadata patterns for wallets and dapps + - Interoperability expectations across clients consuming the same list format + - How curated lists and community discussion can reduce duplicated effort +- Developer feedback loop + - Documentation survey promoted to gather input on Stellar + Soroban docs + - Using community feedback to identify gaps and prioritize improvements + +### Resources + +- [Stellar Asset List (SEP-0042)](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0042.md) +- [SEP draft](https://github.com/orbitlens/stellar-protocol/blob/sep-0042-token-lists/ecosystem/sep-0042.md) +- [Discord discussion](https://discord.com/channels/897514728459468821/1162558946867953704) +- [Take the documentation survey](https://discord.com/channels/897514728459468821/1204462856037470248/1205196745877757962) diff --git a/meeting-notes/2024-02-15.mdx b/meetings/2024-02-15.mdx similarity index 55% rename from meeting-notes/2024-02-15.mdx rename to meetings/2024-02-15.mdx index 89ecc5dbb1..846dbed512 100644 --- a/meeting-notes/2024-02-15.mdx +++ b/meetings/2024-02-15.mdx @@ -1,31 +1,40 @@ --- -title: "2024-02-15" -authors: naman -tags: [protocol] +title: "WebAuthn Encoding Host Functions" +description: "Discussion on adding Base64 and JSON encoding/decoding host functions to support WebAuthn in Soroban, including performance tradeoffs, interface design, maintainability concerns, and next steps." +authors: + - dmytro-kozhevin + - graydon-hoare + - kalepail + - leigh-mcculloch + - naman-kumar +tags: [developer, CAP-52, CAP-53] --- - +import DriveVideo from "@site/src/components/DriveVideo"; -[Discord agenda thread](https://discord.com/channels/897514728459468821/1207385360116490360) + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1207385360116490360) + +This meeting continued the WebAuthn discussion by focusing on the process and implications of adding new host functions, using WebAuthn as a concrete motivating use case. The core question was whether Soroban should provide built-in encoding and decoding primitives—specifically Base64 and JSON—to enable secure and maintainable WebAuthn verification flows. + +The group examined performance characteristics, interface scope, and long-term maintenance risks of expanding the host-function surface area. While WebAuthn remains a compelling use case, the discussion emphasized carefully balancing developer enablement with Soroban’s stability, security, and sustainability goals. + +### Key Topics 1. The meeting was focused on the process of adding host functions, using WebAuthN as the example use case; continued from the previous meeting. 2. Discussion of remaining concerns with adding secp256r1 verification host function from previous meeting. - What does it mean for secp256r1 to be added as a host function vs. as a signer type? - - As a host function, user can sign soroban auth entries. Need another stellar account to fund and submit tx to the chain. The latter can be done by a stellar account which may be operated by a wallet or a contract. + - As a host function, user can sign soroban auth entries. Need another Stellar account to fund and submit tx to the chain. The latter can be done by a Stellar account which may be operated by a wallet or a contract. - \_\_check_auth is invoked when the contract being interacted with calls require_auth -3. CAP-52 was drafted to introduce encoding/decoding functions for Base64, which is needed by WebAuthN. Considerations discussed in the meeting: +3. [CAP-0052](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0052.md) was drafted to introduce encoding/decoding functions for Base64, which is needed by WebAuthN. Considerations discussed in the meeting: - Performance: 1066 bytes that costs 1M instr to encode a 32byte hash; so the cost is very small and it’s questionable whether a host function is required. - Interface required two functions (encode/decode) - Implementation wise, WebAuthN requires url alphabet and padding, which decoder likely needs to support. Should we use symbols or ints? Do we need custom alphabets? - Do we really need more encoding schemes? Isn’t XDR enough? - Expensive auth mechanisms, i.e. webauthn, cannot be coupled with contracts with heavy business logic (which might be a lot of contracts), thus making adoption problematic. - We should probably add building blocks to enable the ecosystem to add new use cases. -4. CAP-53 was drafted to introduce encoding/decoding functions for JSON, which is needed by WebAuthN. Considerations discussed in the meeting: +4. [CAP-0053](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0053.md) was drafted to introduce encoding/decoding functions for JSON, which is needed by WebAuthN. Considerations discussed in the meeting: - Performance: 3.9Kb, 2.5M CPU instr. - If the size of the input blob is unknown, execution time will increase. - Valuable to have such a lightweight function that’ll be used in various place. @@ -41,3 +50,8 @@ tags: [protocol] - Next steps: - Core team to put together a plan for adding Base64. This is an important exercise that helps determine even more challenges of doing so. The output of this exercise may be that base64 _shouldn’t_ in fact be implemented at this point. - Discussion around the JSON interface is to be continued. + +### Resources + +- [CAP-0052: Base64 encoding/decoding](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0052.md) +- [CAP-0053: JSON encoding/decoding](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0053.md) diff --git a/meetings/2024-02-22.mdx b/meetings/2024-02-22.mdx new file mode 100644 index 0000000000..f715446c8f --- /dev/null +++ b/meetings/2024-02-22.mdx @@ -0,0 +1,38 @@ +--- +title: "RPC Tooling Highlights" +description: "Overview of recent Soroban RPC tooling updates, focusing on TypeScript bindings, RPC providers, local RPC setup, and deploying Stellar Asset Contracts using Phase 0 flows." +authors: [chad-ostrowski, kalepail, naman-kumar] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1209582245824823337) + +This session highlighted recent improvements and practical guidance around Soroban RPC tooling, with an emphasis on developer workflows. The discussion walked through how developers can more easily interact with Soroban using modern TypeScript bindings and clarified options for running or choosing RPC infrastructure. + +The conversation also covered hands-on operational topics, including standing up a local `stellar-rpc` instance and deploying a Stellar Asset Contract on mainnet under the current Phase 0 constraints, helping developers understand what is possible today and how to get started. + +### Key Topics + +- TypeScript bindings + - Updates and current state of the Soroban TypeScript SDK + - Improved ergonomics for interacting with Soroban RPC + - Practical examples and guidance from ongoing development work +- RPC infrastructure options + - Overview of available Soroban RPC providers + - Tradeoffs between managed providers and self-hosted setups +- Running `stellar-rpc` locally + - Using the official Docker image to stand up an RPC instance + - Common setup patterns for local development and testing +- Stellar Asset Contract deployment (Phase 0) + - Installing and invoking a Stellar Asset Contract on mainnet + - Constraints and expectations under Phase 0 + - How RPC tooling fits into real-world deployment workflows + +### Resources + +- [Available RPC providers](/docs/data/apis/rpc/providers) +- [Standing up a `stellar-rpc` docker container](/docs/data/apis/rpc/admin-guide/installing#docker-image) diff --git a/meetings/2024-02-29.mdx b/meetings/2024-02-29.mdx new file mode 100644 index 0000000000..6a2adff537 --- /dev/null +++ b/meetings/2024-02-29.mdx @@ -0,0 +1,55 @@ +--- +title: "Extended TTL Host Environment Proposal" +description: "Discussion of a proposed host environment change to support separate TTL extensions for contract instances and code, improving cost efficiency while aligning with existing protocol mechanics." +authors: + - alex-mootz + - dmytro-kozhevin + - leigh-mcculloch + - naman-kumar + - tommaso-de-ponti +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1212118102565855243) + +This session focused on a proposed update to the Soroban host environment that allows contract instances and contract code to have their TTL (time-to-live) extended independently. The motivation is to enable more cost-efficient patterns, particularly for contracts where code is long-lived but instances may need more frequent or shorter-lived extensions. + +The proposal was reviewed from both a design and implementation perspective, with discussion around scope, metering impact, and alignment with existing TTL mechanisms. Feedback from core developers and the ecosystem indicated strong support and low implementation risk. + +### Key Topics + +- Proposal overview + - Extend host environment support to allow separate TTL values for: + - contract instances + - contract code (WASM) + - Enables developers to avoid overpaying for TTL when instance and code lifetimes differ +- Motivation and benefits + - More granular and cost-efficient TTL management + - Better alignment with real-world contract usage patterns + - Reduces unnecessary rent costs for long-lived code or short-lived instances +- Implementation considerations + - Change scoped to the host environment + - Does not require changes to metering + - Viewed by core developers as a relatively quick and low-risk update +- Ecosystem feedback and consensus + - Proposal discussed publicly via GitHub Discussions + - Ecosystem participants expressed support through upvotes + - 13 votes recorded in favor of moving forward +- Next steps + - Formalize the proposal in a CAP + - Submit for review and approval by the CAP Core Team + +### Outcomes + +1. [Tommaso](https://discord.com/users/905908451446779965) proposed a core change to allow for extending instance and code TTL with separate values on the host environment to allow for more cost-efficient designs Proposal. +2. Tommaso received feedback on the proposal as well as implementation. Since it didn't require a metering change, core devs thought it to be a quick change. +3. The ecosystem voted in favor of the proposal by upvoting the post on GitHub Discussions 13 times. +4. As next steps, a CAP will be authored to capture the proposal and put forth for approval from CAP Core Team. + +### Resources + +- [GitHub Discussion: Extended TTL host environment proposal](https://github.com/stellar/stellar-protocol/discussions/1447) diff --git a/meetings/2024-03-07.mdx b/meetings/2024-03-07.mdx new file mode 100644 index 0000000000..b3c600abad --- /dev/null +++ b/meetings/2024-03-07.mdx @@ -0,0 +1,43 @@ +--- +title: "Sorobil Cost Visibility Tool" +description: "Overview of the Sorobil cost visibility tool, demonstrating how developers can inspect Soroban contract costs, decode XDR, analyze transaction limits, and understand protocol-level behavior." +authors: [chad-ostrowski, dmytro-kozhevin, kalepail] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/911254664576643122/1215404506964172890) + +This session introduced and walked through **Sorobil**, a developer tool designed to make Soroban contract costs and limits more visible and understandable. The discussion focused on how developers can move beyond opaque failures and instead see exactly which protocol limits a contract invocation is hitting. + +Using real examples, the presenters demonstrated how Sorobil integrates into local development workflows, helps decode XDR, and provides actionable insight into why transactions fail or become expensive. + +### Key Topics + +- Sorobil cost visibility tool + - Overview of the Sorobil CLI/package and its goals + - Inspecting Soroban contract execution costs in a developer-friendly way +- Measuring contract limits and costs + - Understanding which limits (CPU, memory, ledger reads/writes) a contract touches + - Capturing a snapshot of all limits exercised during an invocation + - Using this data to guide contract optimization +- Local development workflow + - Deploying contracts and testing invocations against the unlimited quickstart environment + - Using an unconstrained setup to explore worst-case behavior safely +- XDR decoding and failure analysis + - Decoding transaction XDR to inspect low-level execution details + - Using Sorobil to understand why transactions failed + - Bridging the gap between protocol-level errors and developer intent +- Practical debugging and optimization + - Identifying cost drivers early in development + - Utilizing the Sorobil tool to decode XDR and understand failed transactions. + - Using visibility tooling to avoid surprises when moving toward constrained networks + +### Resources + +- [Sorobil tool](https://github.com/kalepail/sorobill) +- [Stellar quickstart](https://github.com/stellar/quickstart) +- [Relevant blog post: “Show Me the Bill (Part 2)”](https://kalepail.com/blockchain/show-me-the-bill-part-2) diff --git a/meetings/2024-03-14.mdx b/meetings/2024-03-14.mdx new file mode 100644 index 0000000000..674b332bf2 --- /dev/null +++ b/meetings/2024-03-14.mdx @@ -0,0 +1,63 @@ +--- +title: "CAP Proposals for Soroban Costs" +description: "Review of multiple CAP proposals focused on Soroban cost reduction, host functionality, and TTL management, as discussed with the CAP Core Team." +authors: + - david-mazieres + - dmytro-kozhevin + - graydon-hoare + - leigh-mcculloch + - naman-kumar + - nicolas-barry + - tommaso-de-ponti +tags: + - developer + - CAP-51 + - CAP-53 + - CAP-54 + - CAP-55 + - CAP-56 +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1217193723612368926) + +This session captured a CAP Core Team review of several proposals aimed at improving Soroban’s cost model, performance, and developer ergonomics. Proposal authors presented their work, explained motivations and tradeoffs, and received direct feedback from the Core Team. + +The discussion emphasized reducing overall execution costs, improving cost accuracy, and carefully expanding host functionality, while maintaining protocol stability. The CAP Core Team noted they would deliberate and deliver final votes asynchronously. + +### Key Topics + +- CAP review process + - CAP Core Team (including Nicholas and David) reviewed ecosystem-submitted proposals + - Authors presented rationale, scope, and implementation details + - Core Team feedback focused on correctness, cost impact, and long-term maintainability + - Final decisions to be communicated via email +- Cryptography and host functionality + - `CAP-0051` by `@leigh`: add support for `secp256r1` verification as a host function + - Enables WebAuthn-style authentication and broader cryptographic interoperability + - Discussed scope and implications for Soroban authorization +- TTL and lifecycle management + - `CAP-0053` by `@tdep`: separate functions for extending TTL of contract instance and contract code + - Allows more granular and cost-efficient lifetime management + - Avoids overpaying for long-lived code when instances change frequently +- Soroban cost model refinements + - `CAP-0054` by `@graydon`: refine VM instantiation costs + - Breaks monolithic VM instantiation cost into more accurate components + - Aims to better reflect actual resource usage + - `CAP-0055` by `@graydon`: reduce costs by linking fewer host functions during VM instantiation + - Lowers overhead for contracts that do not use the full host function surface + - `CAP-0056` by `@graydon`: cache parsed Wasm modules within a transaction + - Avoids repeated parsing when multiple invocations reference the same Wasm + - Improves performance and reduces redundant computation + +### Resources + +- [Official developer mailing list approvals](https://groups.google.com/g/stellar-dev/c/6MKQSn22H8c) +- [CAP-0051: secp256r1 verification](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0051.md) +- [CAP-0053: separate TTL extension for instance and code](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0053.md) +- [CAP-0054: refined Soroban VM cost model](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0054.md) +- [CAP-0055: reduce host function linking costs](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0055.md) +- [CAP-0056: cache parsed Wasm modules](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0056.md) diff --git a/meetings/2024-03-21.mdx b/meetings/2024-03-21.mdx new file mode 100644 index 0000000000..e683fef1c6 --- /dev/null +++ b/meetings/2024-03-21.mdx @@ -0,0 +1,57 @@ +--- +title: "TX Metadata and Contract Discovery" +description: "Discussion on improving transaction metadata for analytics and contract discovery, including read-only contract invocations, standardized contract metadata, and RPC visibility into inclusion fees." +authors: + - jane-wang + - kalepail + - leigh-mcculloch + - morgan-wilde + - naman-kumar + - siddharth-suresh + - sydney-wiseman +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1219381314931917000) + +This session focused on improving visibility and introspection around Soroban transactions and smart contracts. The discussion explored how richer transaction metadata can unlock better analytics, tooling, and developer understanding of on-chain behavior without changing contract semantics. + +A major theme was contract discovery: making it easier to identify, authenticate, and reason about deployed contracts by standardizing metadata and exposing it through existing and future RPC interfaces. The group also connected these ideas to fee mechanics, emphasizing transparency around inclusion fees and trade-offs. + +### Key Topics + +- Transaction metadata enhancements + - Proposal to expand TX meta to improve visibility and analytics + - How richer metadata can support explorers, indexers, and developer tooling +- Read-only contract invocations + - Explanation of read-only calls that guarantee no state changes + - Motivation for safely querying contract state and behavior + - Ongoing protocol discussions on enforcement and semantics +- Contract discovery + - Motivation to improve visibility and authenticity of deployed contracts + - Need to help developers and users identify what a contract is and where it came from +- Standardized contract metadata + - Proposal to adopt a common contract metadata schema + - Linking deployed contracts to source code and descriptive information + - Use of `contractmeta` to embed metadata at build time +- Fees and inclusion cost visibility + - Discussion of inclusion fees and surge pricing behavior + - Importance of monitoring fees and understanding trade-offs in transaction submission +- RPC improvements + - Early plans for a new RPC endpoint + - Goal: provide better guidance and observability around inclusion fees + - Enable developers to make informed decisions when setting fees + +### Resources + +- [TX meta change PR](https://github.com/stellar/stellar-xdr/pull/175) +- Read-only invocations: + - [Discussion 1](https://github.com/stellar/stellar-protocol/discussions/1454) + - [Discussion 2](https://github.com/stellar/stellar-protocol/discussions/1456) + - [Discussion 3](https://github.com/stellar/stellar-protocol/discussions/1464) +- [`contractmeta` macro documentation](https://docs.rs/soroban-sdk/latest/soroban_sdk/macro.contractmeta.html) +- [Surge pricing and inclusion fees](/docs/learn/fundamentals/fees-resource-limits-metering#inclusion-fee) diff --git a/meetings/2024-03-28.mdx b/meetings/2024-03-28.mdx new file mode 100644 index 0000000000..6374943321 --- /dev/null +++ b/meetings/2024-03-28.mdx @@ -0,0 +1,49 @@ +--- +title: "SEP Process and Asset List Feedback" +description: "Discussion on proposed updates to the SEP process and ecosystem feedback from implementing SEP-42 Stellar Asset Lists." +authors: + - esteban-iglesias + - jake-urban + - kalepail + - naman-kumar +tags: [developer, SEP-42] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://github.com/stellar/stellar-protocol/discussions/1475) + +This session focused on two closely related topics: proposed changes to the Stellar Ecosystem Proposal (SEP) process, and practical feedback from teams implementing SEP-42 Asset Lists. Together, these discussions highlighted how standards evolve in practice and how process design can better support ecosystem participation. + +The conversation emphasized decentralizing standards development, lowering barriers for contributors, and incorporating real-world implementation feedback into the evolution of SEPs. + +### Key Topics + +- Updates to the SEP process + - Proposed changes from the Standards Working Group + - Goal of making the SEP process more decentralized and ecosystem-driven + - Evidence of success: the updated process has already been used for multiple proposals +- SEP-42 Asset List implementation feedback + - Real-world experience from Soroswap implementing SEP-42 + - Lessons learned from drafting and adopting the Asset List standard + - How implementation feedback informs improvements to the SEP +- Ongoing discussion and iteration + - Feedback and refinements continuing directly in the SEP proposal document + - Emphasis on open, asynchronous discussion via GitHub +- Next steps + - Gather additional ecosystem feedback + - Update the GitHub SEP repository to reflect the refined SEP process + +### Outcomes + +1. The Standards Working Group proposed changes to the SEP process that empower the ecosystem by making the current process more decentralized and ecosystem-friendly. +2. The process has already been used for several proposals over the last three months. +3. Esteblock from Soroswap shared their journey of participating in the proposal for Asset Lists and implementing the proposed standard +4. Next step is to get further ecosystem feedback then update the GitHub SEP repository with the updated SEP process. + +### Resources + +- [Proposal document](https://docs.google.com/document/d/1Z8FIx5zfhrbYJdCvCotnLDgr6OYxBLxqlf-bL4BbpMQ/view) +- [SEP-42: Stellar Asset Lists](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0042.md) diff --git a/meetings/2024-04-04.mdx b/meetings/2024-04-04.mdx new file mode 100644 index 0000000000..3a51d9b0b5 --- /dev/null +++ b/meetings/2024-04-04.mdx @@ -0,0 +1,57 @@ +--- +title: "Wallet Standard and CubeSigner Demo" +description: "Discussion of a proposed Wallet Standard for Stellar, alongside a CubeSigner demo showcasing hardware-backed key management and low-latency transaction signing." +authors: + - morgan-wilde + - naman-kumar + - piyal-basu + - riad-wahby +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + +## Part 1 + + + +:::note + +Today's recording has two parts. The first 12 minutes are audio-only. The next 45 minutes have video as well. Please note the slides were shared in Discord chat over screensharing, due to technical difficulties. + +::: + +## Part 2 + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1224408179363024918) + +This session covered two complementary areas of the Stellar developer ecosystem: standardizing wallet interactions and improving key management and signing infrastructure. The first part focused on a proposed Wallet Standard, aimed at improving interoperability between wallets, dapps, and tooling. + +The second part featured a live presentation from Cubist, an ecosystem project, highlighting how hardware-backed key management and low-latency signing can be integrated into Stellar-based applications. Together, these discussions explored how better standards and infrastructure can improve developer experience and end-user security. + +### Key Topics + +- Wallet Standard proposal from Piyal + - Overview of the proposed Wallet Standard and its goals + - Improving interoperability between wallets and applications + - Request for ecosystem feedback on scope, APIs, and expectations +- CubeSigner + - Overview + - Introduction to CubeSigner as a secure, low-latency signing service + - For key generation and transaction signing inside secure hardware + - Positioning CubeSigner as infrastructure for production-grade apps + - Stellar integration demo + - Walkthrough of a Stellar-based CubeSigner example + - Demonstration of API: generating keys, signing, and submitting transactions + - Practical considerations for integrating CubeSigner into existing workflows +- Ecosystem engagement + - How developers can provide feedback on the Wallet Standard + - Ways to engage with Cubist Labs for support and collaboration + +### Resources + +- [Wallet Standard proposal](https://github.com/stellar/stellar-protocol/discussions/1467) +- [CubeSigner Stellar example](https://github.com/cubist-labs/CubeSigner-TypeScript-SDK/tree/main/examples/stellar) +- Cubist devs can be contacted via the Stellar Discord or this [contact form](https://cubist.dev/contact-form-cubesigner-hardware-backed-key-management). diff --git a/meetings/2024-04-11.mdx b/meetings/2024-04-11.mdx new file mode 100644 index 0000000000..5b83b2ee50 --- /dev/null +++ b/meetings/2024-04-11.mdx @@ -0,0 +1,39 @@ +--- +title: "Wallet Interface Standard Discussion" +description: "This overview summarizes the discussion around a proposed wallet interface standard, including scope considerations, ecosystem feedback, and next steps for refining SEP-43." +authors: + - chad-ostrowski + - earrietadev + - naman-kumar + - piyal-basu + - timothy-baker +tags: [developer, SEP-43] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +This session focused on a draft proposal to standardize the Stellar wallet interface, led by Piyal from the Freighter team. The goal of the discussion was to gather ecosystem feedback on the scope, assumptions, and practical implications of introducing a common interface for wallets. + +Participants explored where the proposed standard fits within the broader ecosystem, how it interacts with existing tooling like Stellar Wallet Kit, and whether the current design appropriately accounts for different wallet environments such as browser extensions, mobile wallets, and WalletConnect-based integrations. + +### Key Topics + +- Overview of the [draft proposal](https://github.com/stellar/stellar-protocol/blob/83191be659166e05f8df1257c6f655de9d1afe63/ecosystem/sep-0043.md) for wallet interface standard (SEP-43) +- Feedback on design assumptions: + - Questioning whether requiring the network passphrase adds unnecessary complexity +- Wallet ecosystem coverage: + - Recognition that browser extension wallets, mobile wallets, and WalletConnect-based wallets have significantly different requirements + - Suggestion that mobile and WalletConnect integrations may need separate SEPs, ideally authored by teams building for those platforms +- Role of shared tooling: + - Discussion on how [Stellar Wallet Kit](https://stellarwalletskit.dev) fits into the ecosystem and how it should interact with or depend on the proposed standard +- Next steps: + - Incorporating ecosystem feedback into revisions of the proposal + - Continuing discussion via GitHub to refine scope and requirements + +### Resources + +- [Draft SEP-43: Wallet Interface Standard](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0043.md) +- [Wallet interface standard discussion](https://github.com/stellar/stellar-protocol/discussions/1467) +- [Stellar Wallet Kit repository](https://github.com/Creit-Tech/Stellar-Wallets-Kit) diff --git a/meetings/2024-04-18.mdx b/meetings/2024-04-18.mdx new file mode 100644 index 0000000000..31563467f0 --- /dev/null +++ b/meetings/2024-04-18.mdx @@ -0,0 +1,33 @@ +--- +title: "Ortege Analytics Platform Demo" +description: "This overview summarizes a demo of Ortege, a data analytics platform for Stellar and Soroban, focusing on dashboard creation, shareable metrics, and upcoming AI-powered insights." +authors: [justin-trollip, kalepail, naman-kumar] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/911254664576643122/1215404506964172890) + +This session featured a demo of Ortege, an analytics platform designed to make Stellar and Soroban data more accessible and actionable for the ecosystem. The discussion highlighted how developers, teams, and community members can easily build and share dashboards to track protocol, application, or business-level metrics. + +Participants also previewed upcoming functionality aimed at lowering the barrier to data exploration, including AI-assisted querying and natural language insights, positioning Ortege as a flexible tool for ongoing ecosystem monitoring. + +### Key Topics + +- Overview of Ortege (depricated) as a data analytics platform for Stellar and Soroban +- Dashboard creation: + - Build custom dashboards to track arbitrary ecosystem metrics + - Queries, widgets, and dashboards are shareable across teams and the community +- Accessibility and adoption: + - Free account availability for ecosystem participants + - Designed to surface success metrics for projects and protocols +- Upcoming features: + - Planned AI capabilities to support natural language querying and automated insights + +### Resources + +- [Ortege analytics platform](https://www.ortege.ai) +- [Ortege Documentation](https://docs.ortege.ai) diff --git a/meetings/2024-04-25.mdx b/meetings/2024-04-25.mdx new file mode 100644 index 0000000000..aeb97d422f --- /dev/null +++ b/meetings/2024-04-25.mdx @@ -0,0 +1,38 @@ +--- +title: "CAP-57 Updates and History Archive–Based Restoration" +description: "This overview summarizes updates to CAP-57, focusing on history archive–based state restoration, RPC proof generation, and changes to the state archival architecture." +authors: [garand-tyson, kalepail, naman-kumar] +tags: [developer, CAP-57] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +This session covered significant updates to the state archival and restoration design described in CAP-57, reflecting feedback gathered from the ecosystem. Garand outlined how the proposal has evolved to simplify architecture while improving verifiability and operational flexibility. + +The discussion centered on removing the need for a separate storage system for expired state and instead leveraging Stellar’s History Archive to store archived entries and restoration data. Participants examined how RPC, captive-core, and validators would interact with archived state, as well as open questions around performance and proof construction. + +### Key Topics + +1. Garand discussed changes to the State Archival proposal based on feedback received at Meridian 2023. The proposed changes are: + +- Previously, a downstream system called the ESS (Expired State Store) would store expired entries. In the new proposal, There is no ESS. All Archived entries, as well as all information required to generate restoration proofs for those entries, is stored directly in the History Archive. +- RPC nodes can generate proofs for archived state during preflight +- Captive-core can be directly queried for archived state, meaning that RPC/Horizon instances can potentially service queries for archival state + +2. [The draft proposal](https://docs.google.com/document/d/1FAs3Yfo-o-gVqccrP29NSG8ysRvdEoyvuL7ywV4ijXI) lays out the state archival architecture: evicted persistent entries get archived into the Hot/Cold archives, recorded via the Archive State Tree, and exposed through RPC/RestoreOp proofs instead of a separate ESS. + +- Validators drop expired persistent entries into the Hot Archive, snapshot it into immutable AST subtrees, and publish Merkle roots (plus binary fuse filters) so RPC can produce restoration proofs without replaying history. +- RPC/captive-core access to archived state during preflight enables proofs to be attached by clients, while restore flows ensure each archived version can only be restored once and cannot be recreated with different data. +- The design keeps live state lean, lets History Archives store only snapshots + proofs, and encourages downstream systems (RPC/Horizon) to fetch archived snapshots on demand or shard them across nodes. + +3. Ongoing GitHub discussion about safely evicting and later restoring persistent contract state in CAP-0057 by archiving it in Stellar’s History Archive and generating cryptographic proofs during preflight, without introducing a new storage system. +4. Snapshot size is TBD; it's a function of bucket list size as well as memory and historic demands placed on the RPC. +5. Bloom filters are the likely solution for proof of non-exitance though they come with trade-offs. They enable fast and cheap lookup but are probabilistic not deterministic. +6. Further comments are welcome. + +### Resources + +- [CAP-57: Archived Contract State](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0057.md) +- [Discussion: Persistent Entry Eviction](https://github.com/stellar/stellar-protocol/discussions/1480) diff --git a/meetings/2024-05-02.mdx b/meetings/2024-05-02.mdx new file mode 100644 index 0000000000..56d75bd9f4 --- /dev/null +++ b/meetings/2024-05-02.mdx @@ -0,0 +1,35 @@ +--- +title: "Stellar Plus JavaScript Library" +description: "This overview summarizes a presentation of Stellar Plus, a composable JavaScript library designed to simplify Stellar and Soroban development through unified abstractions and utilities." +authors: [fifo-fazzatti, kalepail, naman-kumar] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/1234887262530048010/1234887262530048010) + +This session featured a walkthrough of Stellar Plus, a JavaScript library presented by Fifo that aims to streamline development across the Stellar ecosystem. The discussion focused on how the library brings together common Stellar and Soroban workflows into a single, composable interface for developers. + +Ecosystem feedback highlighted the breadth of functionality covered by Stellar Plus, including account and asset management, WASM-related operations, and Soroban RPC utilities. Participants discussed how this approach can reduce boilerplate and improve developer ergonomics for building applications on Stellar. + +### Key Topics + +- Overview of Stellar Plus as a JavaScript library for Stellar and Soroban development +- Design goals and architecture: + - Composable abstractions covering core Stellar functionality + - Unified handling of assets, accounts, contracts, and RPC utilities +- Developer experience: + - Simplifying common workflows across classic Stellar and Soroban + - Reducing integration complexity for JavaScript-based projects +- Supporting materials: + - Architecture overview shared via a Miro board + - Code examples and documentation to support adoption + +### Resources + +- [Stellar Plus documentation](https://docs.cheesecakelabs.com/stellar-plus) +- [Stellar Plus architecture (Miro board)](https://miro.com/app/board/uXjVKMDkMPI=/?share_link_id=643609701897) +- [Stellar Plus examples repository](https://github.com/fazzatti/stellar-plus-examples) diff --git a/meetings/2024-05-09.mdx b/meetings/2024-05-09.mdx new file mode 100644 index 0000000000..92ecfde2ea --- /dev/null +++ b/meetings/2024-05-09.mdx @@ -0,0 +1,25 @@ +--- +title: "Passkeys, Smart Wallets, and Voting on Soroban" +description: "This overview summarizes a discussion on using passkeys with Soroban smart contracts, including a voting demo, smart wallet considerations, and implications for fees and signing models." +authors: [kalepail, naman-kumar] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +This session explored how passkeys can be used as a secure and user-friendly signing mechanism for Soroban smart contracts. Tyler presented a working voting application that leverages passkeys to sign transactions, demonstrating how modern web authentication can integrate cleanly with on-chain logic. + +The discussion expanded into broader implications for smart wallets on Soroban, including fee sponsorship challenges, signer abstractions, and interoperability with existing password managers. Participants examined how passkeys could bridge web2 authentication patterns with web3 smart contract interactions. + +### Key Topics + +1. Tyler built a voting application using passkeys to sign the transaction, which is an implementation of the `secp256r1` verification function. +2. He showed a cross-platform implementation (web and mobile) and demonstrated that passkeys are the perfect interface between web3 contracts and web2 authentication mechanisms that most end users are accostomed to. +3. Ecosystem members discussed the use of smart wallets that would use passkeys as a signer. Challenges were identified around fees requires for smart wallets, the need for a common implementation for a smart wallet, as well as how might it interface with existing password managers. + +### Resources + +- [Passkey-based voting demo](https://passkey.sorobanbyexample.org/) +- [Soroban passkey demo code repository](https://github.com/kalepail/soroban-passkey) diff --git a/meetings/2024-06-13.mdx b/meetings/2024-06-13.mdx new file mode 100644 index 0000000000..1b8a072e4d --- /dev/null +++ b/meetings/2024-06-13.mdx @@ -0,0 +1,33 @@ +--- +title: "Passkeys in Smart Wallets, Super Peach, and Launchtube" +description: "This overview summarizes demos and discussions around passkeys in smart wallets, including Super Peach, passkey-kit, and Launchtube, with a focus on fees, SDK tooling, and standardization efforts." +authors: [kalepail, naman-kumar] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discordapp.com/channels/897514728459468821/1249708164366995527) + +This session focused on advancing passkeys as a core building block for smart wallets on Stellar, showcasing real applications, developer tooling, and infrastructure services that reduce friction around signing, fees, and account management. Tyler demonstrated how passkeys can be used end-to-end in authorization flows and transaction signing. + +The discussion also looked ahead to ecosystem-wide interoperability, introducing a draft smart wallet standard and services that abstract away common hurdles like fee payment and sequence numbers. Together, these pieces aim to make passkey-powered wallets practical, auditable, and ready for broader mainnet adoption. + +### Key Topics + +1. Tyler created Super Peach, a web3 application that uses passkeys to sign transactions. He demonstrated how passkeys can be used in authorization flows and how they can be used to sign transactions. +2. Introduced `passkey-kit`, a TypeScript SDK for creating and managing Smart Wallets via passkeys (includes the actual [Smart Wallet interface](https://github.com/kalepail/passkey-kit/tree/main/contracts)). +3. Introduced [Launchtube](https://github.com/stellar/launchtube), a service for submitting transactions onchain by covering both the transaction fee AND the sequence number. Wild! + - Ask in the [`#passkeys` channel](https://discord.com/channels/897514728459468821/1250851135561142423) on Discord for a (testnet) token (funded by the SDF). +4. He shared his vision for pushing the passkey implementation through to becoming a standard for the ecosystem. The draft Smart Wallet standard documents the proposed V1 interface (admin, session, and recovery signers), hosted reverse lookup helpers, and future hooks (policy-enforcing contracts, richer multisig flows) so that passkey-powered wallets can be audited and deployed on mainnet with a predictable ABI. + +### Resources + +- [Super Peach repository](https://github.com/kalepail/superpeach) +- [Super Peach live demo](https://superpeach.xyz) +- [`passkey-kit` repository](https://github.com/kalepail/passkey-kit) +- [`passkey-kit` demo](https://passkey-kit-demo.pages.dev) +- [Launchtube repository](https://github.com/stellar/launchtube) +- [Draft smart wallet standard proposal](https://docs.google.com/document/d/1c_Wom6eK1UpC3E7VuQZfOBCLc2d5lvqAhMN7VPieMBQ) diff --git a/meetings/2024-06-20.mdx b/meetings/2024-06-20.mdx new file mode 100644 index 0000000000..6611c5e762 --- /dev/null +++ b/meetings/2024-06-20.mdx @@ -0,0 +1,42 @@ +--- +title: "Kirckz on Meru and Blend Integration" +description: "This overview summarizes a spotlight discussion on Meru’s integration with Blend, covering non-custodial financial services for freelancers, fee sponsorship, and Soroban-based DeFi primitives." +authors: [amilcar-erazo, naman-kumar] +tags: [spotlight, SEP-30] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discordapp.com/channels/897514728459468821/1252402904221089883) + +This spotlight session featured Kirckz discussing Meru, a financial services application built for freelancers and remote workers in Latin America. The presentation walked through how Meru combines USD accounts, its own anchoring infrastructure, and Stellar-based primitives to deliver a non-custodial user experience. + +Meru’s clients get invoices, virtual cards, and local withdrawals while Meru sponsors fees and integrates with Blend for yield. The discussion highlights architectural choices like SDK usage and the operational challenges of sponsoring fees, supporting recovery, and surfacing DeFi yield in a production setting. + +### Key Topics + +- Overview of Meru’s product offering: + - Meru gives freelancers a USD bank account that connects to platforms such as Upwork/Deel, lets them hold dollars, issue invoices, and withdraw locally (including via MoneyGram partnerships). + - The team tackled wallet fee-bumping, passkey recovery, and Blend integration (Node/Python/Rust backend, Flutter/React frontend, SNS/SQS event architecture) to sponsor XLM fees and handle SEP-0030 recovery. + - They plan to surface DeFi yield via Blend primitives, leaning on audited tooling, RPC definitions, and SDKs (`stellar_flutter_sdk` + `blend-sdk-js`) for a developer-friendly rollout. +- Blend Integration + - Use of Blend as a liquidity and yield primitive on Stellar + - Plans to expose DeFi yield through audited Blend tooling +- Technical Architecture + - Backend services built with Node, Python, and Rust + - Frontends using Flutter and React + - Event-driven architecture leveraging SNS and SQS +- Wallet and protocol challenges: + - Fee bumping and fee sponsorship for user transactions + - Implementing passkey-based recovery flows using `SEP-0030` + - Reliance on Soroban RPC definitions and SDKs for reliability +- Tooling and SDKs + - Use of `stellar_flutter_sdk` and `blend-sdk-js` to streamline integration + +### Resources + +- [Meru integration slides](https://docs.google.com/presentation/d/1Fu4AkB0mrvOkK6UDFJHgKwCV-Ul4JRF-xPqTYJ3CQqw) +- [SEP-30: Account Recovery](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0030.md) +- [Meru updates on X](https://x.com/getmeru) diff --git a/meetings/2024-06-27.mdx b/meetings/2024-06-27.mdx new file mode 100644 index 0000000000..72b15aca8d --- /dev/null +++ b/meetings/2024-06-27.mdx @@ -0,0 +1,38 @@ +--- +title: "Stellar CLI Enhancement Rundown" +description: "This overview summarizes updates to the Stellar CLI, including new commands, improved workflows for Soroban development, and planned integrations with Stellar Lab V2." +authors: + - chad-ostrowski + - julian-martinez + - naman-kumar + - willem-wyndham +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +This session featured Chad and Willem from Aha Labs walking through recent enhancements to the revamped `stellar-cli`. The discussion focused on how the CLI has evolved to better support Soroban development while unifying workflows across the broader Stellar ecosystem. + +The presenters highlighted usability improvements, new commands, and local development features designed to streamline common tasks such as network setup, account management, and contract initialization. They also shared a preview of planned expansions to cover more Stellar operations and tighter tooling integration. + +### Key Topics + +- Overview of the new `stellar-cli`: + - Transition from the `soroban` command namespace to `stellar` + - Cleaner, more consistent interface across Stellar and Soroban workflows +- Local Development Workflows + - Local network setup and management with `stellar network container [start|logs]` + - Account creation and funding via `stellar keys [generate|fund|ls]` +- Contract Development Support + - `stellar contract init` for scaffolding new Soroban projects + - Built-in helpers for transaction signing and testing +- Roadmap and Future Improvements + - Expanded support for additional Stellar operations + - Planned integration with Stellar Lab V2 for a smoother developer experience + +### Resources + +- [Stellar CLI repository](https://github.com/stellar/stellar-cli) +- [Aha Labs GitHub](https://github.com/AhaLabs) diff --git a/meetings/2024-07-11.mdx b/meetings/2024-07-11.mdx new file mode 100644 index 0000000000..983b99639a --- /dev/null +++ b/meetings/2024-07-11.mdx @@ -0,0 +1,41 @@ +--- +title: "Crash Course in Stellar Data Analysis" +description: "This overview summarizes a crash course on analyzing Stellar network data, covering Hubble access, efficient querying practices, and practical guidance for data exploration." +authors: + - naman-kumar + - nicole-adair + - simon-chow + - sydney-wiseman +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discordapp.com/channels/897514728459468821/1259997651995332653) + +The SDF Data team delivered a practical introduction to analyzing Stellar network data, aimed at developers and analysts looking to explore on-chain activity more effectively. The session focused on how to get started with Hubble, Stellar’s BigQuery-based analytics platform, and how to approach large datasets efficiently. + +In addition to tooling walkthroughs, the discussion emphasized analysis best practices—from understanding protocol-level data structures to writing cost-effective queries—and highlighted community resources available for ongoing support. + +### Key Topics + +- Overview of Stellar data analysis using Hubble (BigQuery) +- Getting Started + - Logging into Hubble and joining the `crypto-stellar` dataset + - Using the UI’s schema and preview tools before running full SQL queries +- Querying Best Practices + - Avoiding `SELECT *` on large tables + - Filtering by partitioned or clustered columns such as `batch_run_date` and `closed_at` + - Minimizing unnecessary joins to keep queries fast and inexpensive +- Analytical Mindset + - Framing each analysis with a clear objective +- Building Support + - Sharing queries and asking questions in the dedicated Discord channel for data-related topics + +### Resources + +- [Presentation slides](https://docs.google.com/presentation/d/1QsCwFLFcDF4RmNIwtSSnNrUfZb0RM0kLxOOxC7ENY5M) +- [Hubble analyst guide](/docs/data/analytics/hubble/analyst-guide) +- [`#hubble` help channel](https://discord.com/channels/897514728459468821/1214961876253933649) diff --git a/meetings/2024-07-18.mdx b/meetings/2024-07-18.mdx new file mode 100644 index 0000000000..ae01a2bd7c --- /dev/null +++ b/meetings/2024-07-18.mdx @@ -0,0 +1,57 @@ +--- +title: "Smart Wallet Proposal with WebAuthn" +description: "This overview summarizes a discussion on a proposed WebAuthn-based smart wallet for Stellar, focusing on custom authorization, passkeys, and ecosystem-driven use cases." +authors: + - alejo-mendoza + - dmytro-kozhevin + - ishan-singh + - kalepail + - naman-kumar + - robin-olthuis + - timothy-baker +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discordapp.com/channels/897514728459468821/1262457130338881566) + +This session centered on a proposal to introduce a smart wallet as a public good on Stellar, leveraging WebAuthn (passkeys) and Soroban’s custom authorization via `__check_auth`. The discussion highlighted how Stellar’s protocol already supports customizable auth logic, making smart wallet implementations relatively straightforward at the protocol level. + +Developers shared practical use cases enabled by custom authorization, including automation, improved UX for onboarding, and application-managed accounts. The conversation emphasized passkeys (and protocol support for `secp256r1`) as a way to move beyond seed phrases while enabling richer authorization policies for different app needs. + +:::note + +The first part of the call was lost. The video posted above captures the second half of the call where ecosystem developers shared their use cases and requirements for a smart wallet on Stellar. + +::: + +### Key Topics + +- Tyler put forward a proposal for a smart wallet as a public good. Given that the native auth can be overloaded by using `__check_auth`, a Stellar implementation of a smart wallet is fairly straightforward. The capability to customize auth is already built into the core protocol. +- The proposal only uses WebAuthN-based signers (passkeys). It does not use ed25519, which it arguably should, given that ~100% of Stellar accounts use that scheme. It also introduces the notion of temporary and admin signers to illustrate that an account can be managed by multiple signers, each with a different access policy. +- The biggest unlock with custom auth is the ability to execute custom logic. We heard from various ecosystem members about how they might use it. + - A dev building a perpetual protocol suggested smart wallets could automatically manage DeFi positions, a major improvement over the status quo where users must constantly monitor assets to decide when to execute a trade. + - Folks are excited about replacing seed phrases with passkeys, especially for onboarding net-new users to the blockchain. + - Authorizing cross-chain messages from a different chain, particularly programmatic authorization, requires custom account implementations. + - Some apps note that users prefer not to think about wallets at all and instead just experience the app (especially games). In these cases, the app may assign a temporary account and control access via `check_auth`. +- Ecosystem use cases discussed: + - Automated DeFi position management for perpetual protocols + - Passkeys as a replacement for seed phrases to improve onboarding + - Programmatic authorization for cross-chain messaging + - App-managed or temporary wallets for games and consumer apps + - Microtransactions without requiring explicit user signatures +- Design Choices + - Two-contract architecture: a factory for atomic deploy + init, and a wallet contract for signer logic, upgradeability, and WebAuthn verification + - Signer storage model: admin signers persisted, session signers temporary, with admin count tracking to prevent removing the last admin + - TTL management: aggressive instance and key TTL extension to reduce archiving and restore UX friction + - Core wallet functions: admin-guarded `add`, `remove`, and `upgrade`, plus policy-enforcing `__check_auth` + - Event indexing and recovery: signer add/remove events with public key echoing to enable indexers and session signer rehydration + - Ecosystem and security considerations: cross-domain add-signer flows, mobile UX pitfalls, passkey origin binding risks, and mitigation via multiple or alternative signer types + +### Resources + +- [Smart wallet proposal (GitHub discussion)](https://github.com/stellar/stellar-protocol/discussions/1499) +- [WebAuthn smart wallet implementation](https://github.com/kalepail/passkey-kit/blob/main/contracts/webauthn-wallet/src/lib.rs) diff --git a/meetings/2024-07-24.mdx b/meetings/2024-07-24.mdx new file mode 100644 index 0000000000..77ea19e6fd --- /dev/null +++ b/meetings/2024-07-24.mdx @@ -0,0 +1,173 @@ +--- +title: "Build Better: Getting Started in Rust" +description: "A hands-on walkthrough of Soroban smart contracts in Rust: using Okashi and the Stellar CLI to build, deploy, and invoke a simple storage contract, then exploring a more advanced staking-style contract with enums-as-storage keys, token mint/burn flows, and cross-contract calls." +authors: julian-martinez +tags: [tutorial] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Julian introduces the fundamentals of writing Soroban smart contracts in Rust, focusing on how contract functions, data types, and contract storage work in practice. The session starts with a simple “set/get” example to show how values are stored and retrieved on-ledger, plus common patterns like default fallbacks when a key doesn’t exist. + +From there, the workshop shifts into developer tooling: using Okashi for rapid iteration and then using the Stellar CLI locally to initialize a project, generate keys, build WASM, deploy to testnet, and invoke contract functions. The latter portion expands into a staking-style contract to demonstrate more advanced storage patterns, authorization, token interactions, and cross-contract calls. + +### Key Topics + +- Okashi ([deprecated](https://x.com/Okashidev/status/1895579419663876296)) workflow overview: code editor, contract function panel, and console output for quick iteration +- Soroban/Rust setup basics: + - `#![no_std]` for smaller WASM and reduced ledger/storage bloat + - `soroban_sdk` and common types (`Env`, `Symbol`, `String`, `symbol_short`) + - Contract structure patterns using `#[contract]` + `#[contractimpl]` +- Simple storage contract (setter/getter pattern): + - Setting a value via `env.storage().instance().set(...)` + - Reading a value via `env.storage().instance().get(...)` + - Returning a default `String` when a key is missing (error handling instead of assuming a value exists) +- Stellar CLI local workflow: + - Install toolchain + CLI, scaffold a contract project, and build to WASM + - Generate and manage keys for signing (`stellar keys generate`, `stellar keys ls/show`) + - Deploy to testnet and invoke functions via `stellar contract deploy/invoke` + - Address conventions recap (e.g., public account vs contract id vs secret key prefixes) +- Staking-style contract tour (advanced patterns): + - Using an enum as a structured storage key map (e.g., `DataKey::{Admin, Token, ShareToken, IsActive, ...}`) + - Admin gating via authorization (`require_auth`) and `assert` checks against stored admin + - Campaign state toggles (`start_campaign`, `stop_campaign`) and status checks + - Deposit/withdraw flows tied to campaign activity + - Cross-contract token calls using a client interface: + - `transfer` of deposit token into the contract + - Minting/burning a “share token” to represent participation +- Testing overview: + - Simulated `Env`, mocked addresses, minting setup balances + - End-to-end test validating deposit updates, share token mint, and withdrawal cleanup + +### Resources + +- [Okashi GitHub](https://github.com/okashi-dev) + +
+ Video Transcript + +[00:00] Hear me, my name is Julia Martinez, Senior Developer Advocate, and I'm super excited to get today's session kicked off, where we will be learning the ins and outs of Russ smart contract development. So I'll go ahead and kick it off with a quick introduction. Again, my name is Julian Martinez, Senior Developer Advocate and Common Man here at the Stellar Development Foundation. Once again, over the next- I guess half an hour to an hour or So, depending on how you guys are feeling- we're going to dive into some basic smart contract development, and we may even scale it up to maybe getting into a staking smart contract. Just kind of depends on how the flow goes, all right. So first things first. We're going to go ahead and dive into a great online IDE tool here. It's called okashi. Again, okashi is an online IDE. It's really good for iterating on your smart contract ideas. It's got a cool code window where you can type in all your syntax. There's the contract window where you can actually call the functions related + +[01:00] To the smart contract that you've just you just coded. There's also a console window here that shows the output of the smart contract calls, or the function calls, rather. So, If you'd like to follow along, you can actually scan this QR code and, as I'm updating the smart contract online, you should be able to see the smart contract being updated on your local machine as well. So what I'm going to do here is bring okashi to my main screen. You should be able to see it here, all right? So this is okashi again. We have the code window ready for all the syntax and you know this is where the bulk of your logic will live for your smart contracts. We have the contract window, which is going to hold all the functions and will allow you to, you know, execute and invoke different functions that live on that contract. And you have your console window here, which is just going to + +[02:00] Output the, I guess, or it's going to show the return data of the function call itself. All right, So let's go ahead and dive into rust and sorond. If you don't already know, sorond is Stellar's smart contract platform. So a few things here, first things. First, you see the flag nod. That is to tell the compiler to not use Russ standard Library sorb on Smart contracts- this flag- and optimize for smaller contracts So they don't take up as much space on The Ledger. This makes for, you know, good forward term thinking for things like State bloat and overall just being mindful of not taking up more space and storage than you need to, Because in the future it can lead to bottlenecks and user experience and overall just becomes IM manageable. So you see, here on line three, you Soroban SDK. This is Because Soroban uses a + +[03:00] Special dialect of rust with dedicated data types and some special attributes. So we're importing a couple of attributes here, namely contract implementation, the N data type, symbol, string and symbol short. Now let's go ahead and cover a few of these two attributes. Reg are mainly pertain to telling the compiler to treat all the following syntax as a contract would be treated. So When it comes to the contract implementation, it exposes all the publicly facing functions to the environment for processing through different mechanisms like testing, static envir, static function testing, Etc. So with EnV, this data type is going to be responsible for accessing smart contract storage within the contract itself, as well as data from The Ledger. So you'll see how we use + +[04:00] This a little bit later When we're creating a key value kind of like storage mechanism for the smart contract symbol. Now, symbol is a- I don't want to say it's another term for string, But you can think of it as a string. So there are two types of symbols: short symbol, which is a string of up to nine characters, And then long symbol, which is a string of up to 32 characters. But the difference here is that long symbols actually require you to pass in the N data type before constructing them. Here, on the next data type we have, the string is exactly what it sounds like. Here it's going to be a concatination of letters. Symbol short is a function that we use to kind of just quickly create short symbols. In this case we're going to create a short symbol for the word title. Now you see here on line five, const title symbol equals symbol short title. We're just going to use this as + +[05:00] Kind of like a label for the key value system that we're about to set up. So again you see: const title symbol equals symbol short, And then we're using the word title. All right, here on line seven you see the contract attribute, followed by line eight, Pub struct- title contract. So we're creating an empty struct and all sorond smart contracts will follow a similar pattern. You see a contract attribute, the empty struct with the name of the contract itself, And Then followed by a contract implementation attribute And then the implementation of the actual contract. So let's go ahead and start writing some functions here. This is our title contract and the main purpose of this is to kind of illustrate the key value system When it comes to storage. So I'm going to create a value for a key And then I'm going to go ahead and, well, I'm going to create the key value, get her and Setter. So + +[06:00] We're going to be able to set a value to a key. Then we're going to be able to retrieve that value from a key. So let's go ahead and say Pub function. We're going to say set, title. This is going to take in two arguments. We're going to say m of the type n. We're going to take in a string, and this is going to be for the title value. We're not going to return anything. So we'll go ahead and get right into the logic. Again, referring back to the n data type up here, you see M storage. We have to go instance, do set, And then we're going to actually, If you see set, we're going to say set and title, And then we're going to bring the argument from the title or bring in the argument title in from the functions here or the argument + +[07:00] Here. So let's go ahead and compile this. Let's see If this works again. You see, and storage: we're taking in the or we're accessing the smart contract storage. Here we have instance, which is a certain type of data type for Soroban smart contracts, of which there are three: temporary, persistent and instance. And then you see the set method here, where we're actually setting a value to the title key And then the value will be title. But it looks like we have some errors here. Unused variable signature Little literal. Cannot find type title in this scope. Okay, public function string title and storage: okay, we got some issues here. So let's see what we got. + +[08:00] And storage in title H. So this looks like it was compiled. Function set title: all right, I'm just going to go ahead and do a quick sanity check. I do have the correct function on the side here. I'm also wondering If this is going to work. All right, let's go ahead and see what the issue was in the last set title and title. Okay, not sure what the issue was here, But let's go ahead and let's see If we can actually debug it side by side. Let's go ahead and find the difference. See this, see If this compiles. It probably won't + +[09:00] Okay. Well, I'm not sure what the issue was here, But long story short, here is the correct function. As you can see, we see Pub function, set title. We're taking in two arguments. One is the data type n. The next is the data type string for our title. We're accessing the smart contract storage, Again by typing m Storage Instance. Again, there are three types of storage in sorb on Smart contracts and they each kind of serve a purpose When it comes to how long you want your data to live by default, again, temporary, instance and persistence. For this use case, we're just going to use instance. We have the set method here, where we are setting a value to the key title and the value will be the title argument that we're passing in. So let's + +[10:00] Go ahead and set a title. We'll say: soron rocks, cool. It looks like our function call went through and we didn't get any return data. However, to access this or to access the data that we just set to the contract storage, we need to create a getter function. So to do this we actually just have to follow the same structure here- Pub function, we have to say get title. We're only passing in one argument that is of type in, and If we want to return a string. So for this we're going to go end storage instance and we're going to say and title, and we are going to say do unwrap, + +[11:00] Or I'm going to say string from string and we're going to pass in the n and we're going to say default message, all right, So default message here. So what is this doing? Let's see If this compiles. First, don't want to hit a block. Okay, instance instant. So it looks like we got a typo here, all right. So what are we doing in this function? You see Pub function, get title. So we have one argument that we're returning or one argument that we're taking in that's of N, And then we're returning the data type string. So here you see n Storage. Again, + +[12:00] This is how we access the contract storage. You see instance, which is the storage type, doget, which is the metal, the method to retrieve data, and you see the key title. So retrieving the value that lives at the key title. And to access this value, see Unwrap. And Now, usually this would be fine, right? But If no value lives at that key, Then you need to have some like error handling, right. So If there is no value that lives at this key, we're just going to return a default message in the form of this string that we defined. So let's go ahead and get the title, all right. So this is a new deployment inside of a contained web browser, inside contained web app. So technically, I have not set any data to this new deployment after creating this function. So I click get title, the default message will be returned, right. So get title default message. I'll go + +[13:00] Ahead and set a new title. We'll say Stellar rules, all right. So we set a new title. Instead of Soroban rules, we'll say Stellar rules. We go ahead and get that all right. Cool. So now you see that we are able to set some data with the set title right, And then we can get the data by retrieving it using the get title method. Now, once again, this was okashi, which is an online ID for sorond smart contracts. Now what I'd like to do is actually U the kind of tutorial over to the CLI part of things and we'll actually start coding and deploying on testnet, which you can actually do on okashi as well. But I do want to get you guys familiar with the tools that the SDF offers, mainly Because, when it comes to the Stellar CLI, this will be + +[14:00] One of your best friends When, or Stellar smart contract development Journeys. So let's go ahead and switch the windows here. All right, close this out for Now. All right, So getting started with the Stellar CLI and rust on your local machine takes three easy steps, right? So what you're going to want to do to make this easier for yourself is to actually scan this QR code here at the bottom left. This will take you right to the getting started section of the docs, where you'll be able to find these exact commands. You'll them right into your CLI. But for the sake of the tutorial, I might as well kind of go through the steps with you guys. So we see curl, Proto, https, tslb 1 2 s, + +[15:00] F, https, and we're going to say rust up, ROP, sh. So what this is going to do- tlv, T, lsv, https, RS. Okay, So what this is going to do is install the rust tool chain on your computer. I've actually done this already, So you can see I have rust version 1 79 it takes a little While, so I will kind of spare you guys on moving forward So fast, Because I know it takes about two and a half minutes. So in the mean time I can kind of go through the installment process. For the rest of the tools, we say rust up and Target, add wasum. Oh my gosh. 32- + +[16:00] Unknown, unnown. The demo effect, unknown- all right. So what this is going to do is tell the Russ compiler to compile Russ smart contracts into wasum and it's going to Output those files into a Target directory. We'll see this process a little bit later, But yeah, so that's what the second command does, is tells the Russ compiler to compile Russ files into WM. And the last command: we have cargo install and we have locked Stellar CLI. So this is going to install Stellar CLI, the latest version. So we're on version 2 0 and I know again, this would probably take around 2 and a half minutes. So If you're following along and it takes a little While, not to + +[17:00] Worry I'll move slow throughout the rest of the tutorial. So just to kind of like make sure we have the correct version of Stellar installed, we'll say Stellar DV. Again, you see, Stellar 2 0 cool. So what we'll do next is we'll actually go here and we're going to say Stellar contract. Actually we'll say Stellar, And then we'll take a look at all the methods that you see the commands here: completion, contract, events, Keys, XDR, network, Etc. So let's go ahead and dive into contract. We'll go ahead and clear this out. We'll say Stellar contract And then we'll say init and we want Hello World. So hello and world, all right. So you see the messages here writing hello world, Etc, ET, all these good, all + +[18:00] These files being written, and you see it has injected a cargo or a rust project right into the directory that we were working for or working from. And let's go ahead and click into our cargo Tomo file. You see a backwards compatible SDK s, Soroban, SDK 20 3 point2. This is a local issue, So I'm just going to go ahead and restart my rust analyzer see If that fixes this little error. But we're going to go ahead and dive right into the hello world directory. So we'll CD hello world, cool. And let's take a look at the contract here and you see an older contract that we were not using or not an older contract, But you see a boilerplate contract that is different from the one that we're using. However, I'm just going to go ahead and bring the contract that we're using right into here and we're actually going to work right from + +[19:00] This, this. So you see this here, where we have the Hello World project that we're in, and one cool thing about the Stellar CLI is that you can actually generate key pairs right from the clii itself. So what we're going to do is exactly that. I'll go ahead and go to Stellar Ci or stellers keys and we'll go ahead and list the keys we have already. So Stellar Keys LS will list the different key pairs that you have available to actually sign transactions, and what that means is, every time you deploy a contract or invoke a function, Etc, that takes fees, right, So there has to be a source account assigned to that transaction to pay that fee. Now we have two that exist already, But let's go ahead and demonstrate how to create a new one and we'll say Stellar generate, we'll say Stell Keys + +[20:00] Generate, and we'll Define the network testet, and we'll give this a name. So for this name we want it to be shy. So let's go ahead and generate a new key pair called sty for the testet network. Cool, you see that the function has went through or the method has been executed. Ed, let's see LS right. So you see stroy, Alice and Joe, and we'll see address shy and N Stellar Keys show Shy. All right, you see the two key or you see the key pair here. This is the public key and private Key. By the way, I'm just doing this for demonstration purposes only. Please never share your private key with anybody. But here is what happens When you generate a new key pair + +[21:00] Using the Stellar Keys generate method. As you can see, the address it begins with G and the source account or the source key that begins with s. So, moving forward, let's go ahead and deploy this smart contract to the network and we're going to go ahead and say Stellar contract deploy. And we'll say, actually we have to start with Stellar contract build, Stellar contract built. And again, what this is going to do is actually compile the rust smart contract into its wasm format here and it'll be in the directory wasum Dash or wasm thir 2- unknown D- unknown. It'll take a little bit, maybe around 1 to two minutes, maybe less, all right. So about 1 minute we'll go ahead and look at the + +[22:00] Release directory. You see here WM 32- unnown D- unknown. In the release directory you have the hello world wasm file. That's Because we're actually still defining this project as Hello World. So When it gets compiled you'll see that it gets compiled into the name of the project itself. So to resolve that we would actually have to go and change the name of this project as a whole. But we won't do all that for the sake of this demo. So we're still working from the hell World directory and we are going to deploy the Stellar smart contract onto testnet. So we'll say contract deploy- right, and we have to define the wasum is going to be Target wasm release and we'll say hello, world and was. And we'll say the network it's going to + +[23:00] Be testet and we're going to say source is going to be shy, all right. So this is the command to deploy contracts onto the network. You see, Stellar contract deploy. You're defining the wasum location, right where the location of the wasum file, So the compiled file of the smart contract. You see network is test net And then the source is troopy. So let's go ahead and send this transaction, fingers crossed. We get a contract ID back, all right, that was very fast. And you see that this begins with the letter c. So let's go ahead and go back to our key pair. Just a quick lesson on how addresses are defined on Stellar. You see, G is for the public address, C is for contract addresses, s is for Source account or Source keys or private Keys, rather, all right. So we've deployed the contract and here + +[24:00] Is our ID. Let's go ahead and call some functions on that contract. So let's go ahead and do Stellar. We got to do contract and for this we're going to do invoke and we're going to define the ID here and we're going to say testet, source is going to be stroy. And then we are going to find our function by doing the following: say dash, we're going to call the function or Define the function, And then we're going to Define the argument here. So set title takes in one argument for title and we're going to use quotes and we're going to say Stellar, smart contracts. Ah, that's too long. We'll just say Stellar, drop, + +[25:00] Cool. All right, you can see here that there is an unrecognized sub command. So what does that mean? Basically means that I spelled a command wrong, right? So one cool thing about the error messaging with the CLI is that it'll actually kick back some functions that it thought that you meant. So we see here: set title. You can use an underscore or a dash or a hyphen. I'm going to go with the underscore here and we're going to go ahead and we're going to demonstrate calling that function. So we'll say set title and we have the correct function name. So it should work. It looks like it did and let's go ahead and get title, all right? So, just like that, we've used the Stellar CLI to create a + +[26:00] New keyp pair, deploy a smart contract to the test network and invoke the functions for that smart contract. And, just to do a quick recap, some of the smart contract Basics that we learned today were data types as well as a little bit of the ins and outs of contract storage. So really quickly, I kind of want to recap those again. We have contract comp, cont, contract implementation, which are both attributes specific to soron and which is responsible for getting data for the smart from the smart contract, as well as a blockchain on Stellar, we call it the Ledger. You have symbol, which is either a string of up to nine characters or 32 characters. String, which is the string that you're familiar with. Symbol short, which is a function that helps you create short symbols. So that's a little bit about the data types. And now, When it comes to contracts storage, we kind of went into some 101 + +[27:00] For key values, I guess. Structure on Stellar smart contracts. Here you see, on line five cons: title symbol equals symbol, short. Again, we're just using this line as a placeholder to kind of have an easy reference for a key that we, that we're naming throughout the smart contract attribute with the blank struct, with the title here- and this is basically how all Soroban smart contracts look- the contract implementation attribute, followed by the implementation of the struct or the contract itself and the functions. Here you see Pub, FN, set, title. Again, we're taking in two arguments. The first one: you will always see n as an argument within each function for soron smart contracts and this will kind of be like the pattern that you'll see throughout your development Journey with Stellar. So + +[28:00] Just get familiar with this structure. Al together. So we have en as the first argument And then title as the next argument, taking in a string value. Again, to access storage within a smart contract you have to write the syntax end Storage Dot, And then the type of storage. So in this case, instance, But again three types: temporary instance and persistent. To set of value to a key, you call the set method right And then you're defining the key. So in this case title, which is a placeholder, or the title or the name of the key, And then the value that we're setting is the argument that we're passing in for the function, also called title. All right, And then to retrieve that data you have to go and through a similar process, But reverse. So, again, accessing the smart contract Storage and Storage instance, right, Getet is the method that you'll + +[29:00] Be using. You're referencing a key for the key in the smart contract storage, So the key here title, and to get the value of the key you have to use the unwrap method. In this case we're planning for errors, So we're saying unwrap, and If no value lives at that key, Then return this default message. All right, so that's a quick recap about on some basic SM contracts, But we do have quite a bit of time left. So I'm going to go ahead and kind of just like dive into some more Advanced smart contract methods or not methods, a little bit more smart contract, a little bit more advanced smart contract logic, and I'm going to go ahead and just kind of freestyle this. But I do have a staking repo that you guys can reference for your own kind of like studies If you want to, and you could find it here in this QR code. So + +[30:00] This will take you to a hackathon repo that I set up that has a couple directories. In it. You'll find a workshop directory and I think you'll also find a slides directory, But we'll be working from the workshop directory here. You should see staking and data types, So I'll go ahead and give you a quick overview on how data types kind of look and, I guess, how they're implemented here. So some of the main things that you're going to want to know is like enumeration, right, one thing that we use here kind of at When it comes to Stellar smart contract development, we often use enumerations as Maps. So When, what does that mean? Right, When you look at a data key of a smart contract, When you look at a data key of a smart contract, you're basically setting a key value system similar to the key value system that we defined earlier. + +[31:00] When we go back to- I guess we can go back to this basic smart contract here: hello world, all right. So you see con title, right, this is the key, And then again we're setting the value later with the title, right, So key is title And then the value is also named title. So If we go back to the staking smart contract right, go ahead and dive into it. You see here the data key we can set. We can host many keys and throughout the smart contract logic we can Define the values for these Keys. So we'll actually just Dive Right into this smart contract over the next 30 minutes And then by the end of it, you should be able to understand more data types, more ins and outs of smart contract storage, And then we'll kind of like take a look at some more examples of Setters and Getters as well as cross contract calls. All right, So go ahead and strap in stick with me. Thank you So much + +[32:00] If you've been with us thus far and I hope you're learning something. Yeah, So let's go ahead and get it kicked off again. At the top you see no STD again. That's to tell the smart cont or to tell the compiler not to use Russ standard Library When compiling Russ smart contracts or When compiling sorb on Smart contracts, keeping them optimized small So they don't take up So much space on the Ledger. Again, on line three, you see us Soroban SDK- some familiar attributes we saw from the last example: contract implementation. Now you see contract type. This is to derive a certain type of what's it called? Certain type of? How do I put this object from the sorond SDK and treat it accordingly So we can dive into the ins and outs after, and I'd be happy to get you more information on how + +[33:00] Attributes work after the stream. So here you see. VC Vector is a, an array of conc, continuable data. So what does that really mean? It's just an array, not specific to types. You can have a vector for Strings, Vector for symbols. Etc address is just the way that we, or the data type for Soroban smart contract addresses U byes n, which is a fixed bytes array or fixed bytes value. You have the end data type, which is again used for accessing data on the blockchain and Ledger, And then interal- we're not going to get into this- But ve- again, this is the actual data typee. V here, in lowercase, is the function that we use to quickly create vectors. All right, So let's go ahead and dive into + +[34:00] The data key enumeration here in the comment. It kind of explains everything. But this is used to represent the state variables stored in the smart contract storage. Again, it allows for super easy, convenient access to data within the smart contract. We covered this a little bit earlier. But each one of these Keys here has a value that's attached to them. So you see public enumeration data key and a list of all the keys within this, within the contract. You see contributions- in parentheses address. So what does this mean? This means contributions is attached to many keys, So all these Keys must hold the data type address right. So address a, b, c and d, u. But all of these addresses are mapped to one value. So basically this data key will record which address made which contribution, right. Rors is just going to be a vector of all the + +[35:00] Addresses that have made contributions. Token is just going to be the address of the token that the contract corresponds with. Share token is going to be the. We're actually going to Mint a token from this contract to use as a share token. So we're going to distribute tokens When a user makes a deposit and here we're actually going to write that into the contract itself. Is active is going to be a Boolean. So If this value is true, certain logic ensues. If this value is false, again a certain path of logic ensues. Admin is going to be an, So this address will have certain capabilities and be able to hold domain over various functions. Initialized is going to be a Boolean that tells the caller whether or not this contract is or is not already initialized. All right, So this is our staking contract. Right + +[36:00] And again you see the contract attribute, empty struct with the name contract implementation attribute, and let's go ahead and dive into our functions. So this is our initialized function. It takes in three arguments again, and the execution environment of the contract admin. Pretty obvious. It's the address of the admin token: wasm hash. This is an interesting one Because we'll actually be creating a new token right from the wasum hash itself And then the token will be the address of the deposit token contract. So the acceptable token for the campaign, for the staking campaign, So it must cor. The token being deposited must correspond with the address that is set here within the smart contract function during this initialized call. And the token wasm has. We're actually going to be able to use a wasm hash to access an interface whereby we will + +[37:00] Be minting a random token or a just generated token, a soron token If you will, to people who make deposits to the campaign. All right, So diving in, you see some familiar functions here, all right. So first thing we're going to do is set the admin address in storage, So n, storage Instance set- again as previously covered- and to access the smart contract Storage. To access, you know, the actual part of the smart contract for storage instance is the type of storage we're accessing. Set is the method and just a quick variation from the demonstration before you see. And data key And then admin: same thing as before. When you see the other smart contract here, right, let's go ahead and open this same thing as you see here. And storage instance set and title, let's go ahead + +[38:00] And go back here. End storage set and data key, admin: here it's just defined in this data key, So you're just literally accessing that. And then you're passing in the argument for the admin argument here in the function: all right. So you see, let share contract, equal token, create contract. We're actually able to do this by importing. Do we import the token data type? All right. So there is a module here, mod token, and we're actually importing that, importing this module, this file here into the main smart contract and what this will do is import the pre compiled Soroban token contract file and will allow the main smart contract to use the functions that live on this contract. So + +[39:00] Actually from a compiled wasum file we're importing that into the main smart contract, rather. So where were we? I'm getting all ahead of myself here. So that's how we're able to use the token U file, the token smart contract, to call the function create contract. So let share token. Right. Here we're going to create a new contract cont by calling the create contract method that lives on the token file and we're going to use the token wasm hash here to create pretty much the token file of the token contract from this compiled wasm hash and we're going to take in. We're going to call that interface at the token contract that we are setting in the initialized function. Here again you see token client + +[40:00] New, all right. So we're going to create a new token kind of like interface over the share contract And then again we're going to call the initialize function. So here we're going to set the admin as the current contract address, So the staking contract itself. We're going to Define the decimal places And then we're going to define the symbol. So just a quick recap. What are we doing here? We're going to say: let the share token equal a new token contract, right. So we're going to create a new token contract. Again, token wasm has token address, all right. So we've created a new token And then on that new token, we're call the initialize function and this is what + +[41:00] We're initializing that new token with, all right. So, moving forward, let's go ahead and see what else is happening. We have end storage instance set, So we're setting the token key with the token smart contract address And then, again moving forward, we're setting the share token key value as the share contract address And then we're setting initialize to true. So after this is all set in, done, most of the data Keys have been defined. You have the token being defined as the actual token address that has been passed in by the user. The share token is defined When the share token is deployed and initialized is active, hasn't come into play yet. Admin was passed in the initialized function And then initialized. The value here gets changed at the very end of the initialized sequence, all right. So, moving forward, we have start campaign and what this is going to do is just basically set the isactive value to true. + +[42:00] The thing here to note is that we're actually making a super cool use of authorization, the authorization framework on Stellar. So you see: Pub function campaign, admin address. So admin Require off. This means that the person making the call must be the same value that is passed in the admin argument. I can't pass in G2. If my address is G1. So this will fail If I'm passing in an address that is not the wallet that I'm calling from. So let current admin equals end storage instant get. So we're defining the current admin, right. So we're saying we're going to get the current admin from the contract storage here by unwrapping the key, the data key, admin, And then we're going to say: If there is no value that lives there- which it + +[43:00] Should, right, Because we've just initialized a smart contract- Then we're just going to return this current contract address and we're going to say: here we're going to create an assertion. If the admin does not match, or If the admin address that was passed in for the argument does not match the current admin address value, Then we're going to panic and we're going to make this function fail. All right. So If that's all good, Then we're going to say: end storage instance Set data key is active, right. So once we set that value to true, the campaign has started and let's go ahead and take a look at what that means. Right, So we can go the other way: stop the campaign again. Same exact logic, except for here at the end. For setting that value data key is active to false. After this, we can easily check the campaign status. Right, Pub function: check + +[44:00] Campaign status. We're returning a Boolean value, So same as from the demonstration before: M storage Instance Getet with the- I guess, Target location, for lack of a better term, And then we're using the unwrap or method. If there's no value that lives at the key is active, we're going to return false by default. All right, So go, let's go ahead and take a look at the deposit functions, in which draw functions. I just want to make sure that we are in fact using the isactive variable. Doesn't look like we are okay. Actually we are here, all right, So let's go ahead and dive right into it. So what does this function do? This function will actually record a deposit made by a contributor If the staking campaign is active. So it takes in four arguments and, as always, going to be the execution environment of the contract contributor, the address of the? U person making the deposit, the + +[45:00] Token, the address of the token to deposit and obviously the amount. So the contribution in tokens- here you see contributor- require off, meaning that again, only the wallet making the call is allowed to make the call. I can't call deposit And then pass in another wallet address. It has to be the wallet that I am making the call from, that I am clicking the button with all right. So let's go ahead and move forward. If the status okay, let is active, equals, And then we're going to Define this value as a Boolean value, Then we're going to call the function check campaign status, right, So we get that return value and we Define that is active. We going say If active is not true, we're going to panic and return this message: campaign is inactive. Long story short: If the campaign is not active, you can't make a deposit. All right, moving forward. We + +[46:00] Have token client- new, all right. So we're going to put the interface over the token address, right. So in this case we're using the sorond smart contract address and we're putting the interface over and we're going to call the transfer method. So we're calling the trans on the XLM token address- at least according to the test here- and we're going to transfer that over to the. We're going to transfer that from the contributor, from the wallet, making the call over to the current contract address, So the staking contract. And then we're going to transfer the amount specified in the Contra or in the call here. Next we're going to Mint the share token to the contributor. So we have let share token equal, get share token again. Remember in the initialized function we deployed a new token called share token And then we recorded the address of that + +[47:00] Share token to the smart contract storage. So here in this call get share token, we're able to retrieve the address of the share token. We're creating a new client or interface to lay over the share token, right? We're saying token Mint, right. So we're telling the share token to Mint an amount of share token tokens to the contributor. And then here we set the contribution right. So there's a command in here or a function in the smart contract called set contribution that manages the value for the contributor key according to the address. So, set contribution. We're saying getting the address of the contributor And then setting the amount to that key, all right. And then, similarly, If we go to the withdrawal function, it works exactly the + +[48:00] Same But in Reverse. So we say, Pub function, withdraw, contributor, address, recipient and token. So you see, contributor, require off. Again, super cool here that you can just use the require author, the authorization framework right in the smart contract, SDK, to kind of have this Dynamic authorization framework for things like managing different calls from addresses. So again, you see, contributor, do require off, meaning that only the wallet making the call can make. This call can't be another address. Right, you must pass in: the address that you're using is active. Again, we're doing a call to a function that exists within the smart contract that returns a true or false value. So Boolean value, If is active, does not return true, Then the campaign or the withdrawal will be null, right, it will not go through. So + +[49:00] Again, the campaign must be active to make a withdrawal. We should probably change, change that, all right, cool. So we're getting a couple values here. So let contribution equal equals self get user contribution. This is going to return a, an amount. We'll go ahead and dive into how that comes into play a little bit later. Again, token client new, laying that interface over the token address. So in this, So this time it's going to be XLM, at least according to the test. And then we're going to say, from this current contract address, from the staking address, we're going to transfer XLM over to the recipient and the amount will be the contribution amount that is defined here and get user contribution. Next we're going to go ahead and keep moving forward. Let share token equals get share token. So we're returning an address value here and again, laying that interface over the + +[50:00] Share token address and calling the burn method. The contributor is going to be the, I guess wallet is going to Target and the amount that it's going to burn is the contribution amount. So, again, total contribution being defined here. And then you see the burn happening here, So the address of the contributor burning their entire contribution. And, last But not least, you see set contribution. We're going to take in the contributor value, or the contributor address, and set the value of that key contributions to Z. All right. So that's a quick rundown and I'd like couple of cool things that happened during this time. So again, we have our data keys and smart contract storage, right, data key here, a good place to keep all your keys. All these Keys have values. How do you manage these values? Well, you use the set method, right. So that's the contract storage right. So + +[51:00] To set a value to a key you have to go n storage instance Set. To get a value from a key you have to go and storage Instance Getet. So there's some data types and contract sorts action happening there. And then one cool thing that I'd like to point out is the cross contract calls. So When you actually, When we're looking at the deposit function, one thing that we may have looked over is that we're actually making a cross contract call. So you see, token client new. All this is that we're using the token contract right. We're using this token contract to access functions at another address outside of the staking contract. So token client new, laying the interface over the external address, which is the token address, And then + +[52:00] We're calling the transfer function on that token address. That is defined by this import here. So that might be a lot for some, But this is a cross contract call and it's made super easy by being able to use this contract import function whereby you can actually import wasum files, So compiled Russ smart contract files, and actually call the functions from these compiled Russ smart contract files on other contracts, and it's all super Compact and very, I guess, malleable, for lack of a better term, all right. So let's go ahead and actually dive into some tests here. I'll just go ahead and quickly go into the staking directory. We'll go ahead and run cargo test. All right, there wasn't very much action here, But we have one huge test for everything + +[53:00] That kind of goes over all of the things that we just went over. Let's go ahead and just kind of like, dive in here. You see, the test contribute. We're letting the end be defined as like a simulated environment, or the end here is just going to be like a default environment being simulated. Here we're saying all authorizations have been accepted, So users are able to call different functions. When it comes to the authorization framework that we have used here, it's going to make it easier for us to kind of get around any blocks that may occur. All right, So we're generating a couple of new addresses, some mock addresses If you will, by using the generate method. So address generate. We're using two, So one for admin, one for user. We're creating a token contract. So token 1 equals create token contract, right, So + +[54:00] Create token contract. We're going to- we won't dive into the ins and outs of this just yet, But essentially it's going to create like a mock token and we're going to be able to use the functions from the token file or this token RS file on this token contract. So you see, we're taking in one argument, which is it the? Which is the address, the admin address, and we're going to return a token client. So we're going to register like a default kind of asset contract available from the SDK and we're going to set the admin as the admin address, all right. So let's go ahead and keep moving forward. Contract ID equals register contract. We're going to pretty much just like upload the staking contract here using this command client. We're going to define a new client, So a new interface for the staking contract at + +[55:00] This contracted ID. And again, mind you, this is all kind of just like setting up the environment, all right. So you see here, token one Mint- this is some setup here that we're just minting the user, kind of like prefunding their wallet to interact with the contract. Assert that the token or the token balance for the user is what we just minted. So a th here. You see initialize, So client, Initialize. So When you see the word client, it just means that you're interacting with the staking contract. So client initialize- this could be staking contract- Initialize. Here you see the admin token wasm that we're using And then the token one address. So let's go ahead and dive into this token wasm file or this token wasm function. So you see: function install: token WM is going to return a by value and what + +[56:00] We're doing- reporting the sorond token contract that we just kind of have pre compiled here and we're going to upload that to the contained simulated blockchain that we're working in from the test environment. But the main thing to focus on here is that we're using, we're importing the token file and we're able to use different functions from this token file thanks to this installed token wasm function. So again, this returns a bytes 32 value and that's what we're going to use within the test, all right. So client Initialize: we're passing in the token wasum value And then we're passing in the token address. Right, we're calling the function start campaign. So this is going to set the is active value to true and again, we can't make deposits or withdrawals unless that is active value is true. Moving forward: client Deposit. So we're taking in a + +[57:00] Deposit from a user, which token that they're depositing and the amount that they are depositing, we're going to say: assert that the user contribution of the user that just deposited matches the total deposit that they made and we're going to assert that the to total share token balance of that user matches the deposit of, matches the initial deposit. That's all should have right Now. And Then we're going to go ahead and test the withdrawal function U, making sure that the user, after they make the withdrawal, has zero contributions left within the contract. So again, just as an example, When we run that test, everything is okay and all of this logic passes the way. It should. All right. So that's about it for me. Again, If you want to check out the DeFi contracts portion or If you want to check out the, I'll go ahead and like go + +[58:00] Through the links again one more time. So we have the okashi link here. If you want to check out the set title smart contract in an online IDE kind of environment, you know that contract should be built out. You should be able to just quickly hit compile, start testing out all of the functions that live there. Again, this setup slide, this QR code here, is going to take you to the setup part of the getting started section of the docs. You're just going to run three simple commands to get started with the Stellar CLI. If you want to check out some more advanced smart contracts that we just went through, including more advanced what's it called data storage, cross contract calls, as well as some more various examples of Getters and Setters, you can go ahead and check out this QR code here, which will take you to the consensus hackathon, and you should see a couple of directories, namely staking and data + +[59:00] Types. All right. So in conclusion, I always tell developers to read the docs, as you can never go wrong with that. Feel free to scan this QR code here and it will take you right to the `Stellar.org/developers` landing page, where you'll see all the good developer docs that you will need. And, last But not least, once again, my name is Julian Martinez, Senior Developer Advocate here at SDF. It's been an absolute pleasure. I love talking about this stuff. You know really love having developer workshops and tutorials- happy to do them anytime, and I really hope that you guys all learned something today. If you have any questions, Then please do hop in the developer Discord or the Stellar Discord. In the should be Soroban ddev or Dev dhel- any one of those channels in general. Just join the Discord and you can also ping me at the God + +[01:00:00] Handore. Again, that's atthe godhand And then an underscore sign If you have any questions. But yeah, thanks again. So much for your time and I hope to see you all soon. Take care. + +
diff --git a/meeting-notes/2024-07-25.mdx b/meetings/2024-07-25.mdx similarity index 73% rename from meeting-notes/2024-07-25.mdx rename to meetings/2024-07-25.mdx index 86542b3711..743ecc6edd 100644 --- a/meeting-notes/2024-07-25.mdx +++ b/meetings/2024-07-25.mdx @@ -1,21 +1,23 @@ --- -title: "2024-07-25" -authors: naman -tags: [protocol] +title: "Soroban Constructor Proposal" +description: "This overview highlights Soroban smart contracts, smart contract development, and SDK updates." +authors: + - dmytro-kozhevin + - kalepail + - leigh-mcculloch + - naman-kumar +tags: [developer, CAP-58] --- - +import DriveVideo from "@site/src/components/DriveVideo"; + + A Core Dev, Dima, discussed the proposal to add constructor support to Soroban, Stellar's smart contract system. Relevant links: [Draft CAP](https://github.com/stellar/stellar-protocol/blob/50dde0611440d6dc562a33462e6ba5f1504b2753/core/cap-0058.md), [Ongoing discussion](https://github.com/stellar/stellar-protocol/discussions/1501), [Motivating Discord Thread](https://discord.com/channels/897514728459468821/1067534037310255225) -Key points discussed: +### Key Topics 1. The proposal improves usability of Soroban for contract and SDK developers. A constructor guarantees contract initialization thus reducing overhead contract code that's usually added to ensure initialization. 2. There was general agreement for the proposal, questions were primarily implementation-focused like whether constructors should handle arguments, what should happen with upgrades, backwards comptability with contract creation functions, and behaviour on wasm update. diff --git a/meetings/2024-07-31.mdx b/meetings/2024-07-31.mdx new file mode 100644 index 0000000000..dd61d6005e --- /dev/null +++ b/meetings/2024-07-31.mdx @@ -0,0 +1,177 @@ +--- +title: "The Future of dApps: Building on Stellar" +description: "A practical walkthrough of building full-stack dApps on Stellar with Soroban—scaffolding an Astro frontend, generating TypeScript bindings, connecting Freighter, and deploying a crowdfunding-style example that mints and deposits tokens on testnet while highlighting Soroban RPC, auth, and fee mechanics." +authors: julian-martinez +tags: [tutorial] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session focuses on the end-to-end developer experience for Soroban dApps: starting from a minimal “hello world” contract call in an Astro app, then layering in generated TypeScript bindings so the frontend can interact with contracts through strongly-typed client methods. Julian Martinez walks through an ad hoc, code-first session on building full-stack dApps with Soroban. + +It then shifts to a more complete crowdfunding-style example dApp, unpacking the contract design (storage keys, campaign state, deposits/withdrawals, and token transfers) and demonstrating a full flow on testnet—from minting a demo token to depositing into a campaign—while touching on Soroban RPC configuration, auth requirements, and typical deployment considerations. Live demos cover scaffolding a project, calling contracts from a frontend, and wiring wallet signing with Freighter. + +### Key Topics + +- Project scaffolding with `stellar contract init` using an Astro template to bootstrap a Soroban-enabled frontend +- Local setup essentials: Rust toolchain, compiling contracts to WASM, and installing the Stellar CLI +- Environment configuration for testnet, including Soroban RPC endpoint and network passphrase values +- Using the Stellar CLI to: + - build contracts (`stellar contract build`) + - deploy contracts (`stellar contract deploy`) + - generate TypeScript bindings (`stellar contract bindings typescript`) +- How generated bindings behave like on-demand npm packages: + - embed network metadata (passphrase, contract ID) + - expose contract methods with typed arguments for easy frontend calls +- Frontend read-only contract calls (simple `hello(...)`) to validate wiring before signing transactions +- Adding wallet connectivity via the Freighter API: + - gating access with `isAllowed()` / `setAllowed()` + - retrieving the wallet address via `getUserInfo()` + - displaying the connected public key in the UI +- Example dApp deep dive (crowdfunding contract): + - storage model (recipient, deadline, target amount, token address, per-user deposits) + - campaign state machine (`Running`, `Success`, `Expired`) + - authorization patterns like `require_auth()` to ensure callers match the signing address + - cross-contract calls through a token client interface for transfers + - deposit flow: user → contract address, then emit events and track balances + - withdraw flow: contract → recipient, reset per-user storage, emit events +- Deployment and signing considerations: + - generating/funding identities via CLI scripting + - pinning a CLI version locally in a repo for reproducible builds + - browser wallet requirements (HTTPS) prompting deployment to a hosted environment for signing +- Demonstrated user flow on testnet: + - connect wallet + - mint demo tokens + - deposit tokens into the campaign and observe state updates + +### Resources + +- [`stellar/soroban-astro-template`](https://github.com/stellar/soroban-astro-template) +- [`stellar/soroban-dex-example-dapp`](https://github.com/stellar/soroban-dex-example-dapp) + +
+ Video Transcript + +[00:00] Hello and welcome everybody. My name is Julian Martinez, Senior Developer Advocate and common man here at SDF, and I'm super excited to get today's session kicked off, where we're going to dive into the basics of building full stack dApps using Stellar smart contracts. So this is going to be kind of like a freestyle, like ad hoc session. I don't have any slides or anything, But we will be using the getting started docs as well as an examp, a common example damp that you may have seen building stuff in your journey throughout the ecosystem. We're going to be using none other than the Soroban example d. All right, So a couple of repos here or a couple of links here. First things first, the getting started QR code, or the QR code that will take you to the link of the getting started section of the docs, And Then + +[01:00] Later, maybe in the second half, When we start signing transactions, we'll transition over to actually cloning the Soroban example dApp And Then we'll kind of like dive into the ins and outs of that one. But for now we'll get started with a very simple front end readonly function call from our hello world contract. Now let's backtrack a little bit. To get started, you're going to need to actually install the Rust tool chain, or it's going to allow you to use tools like cargo and other Rust tools available in the tool chain. The second one is going to- or the second command here is going to tell the compiler to compile the Rust smart contracts into WASM. And the third command is going to install the Stellar CLI. So a little bit of a refresher from the last session, If you haven't already done this, So you can actually scan the QR code at the bottom left right here, as I stated earlier, to kind of like help guide you along throughout this next 30 minutes to an hour or so. + +[02:00] All right, Now I'm going to make it aware that I can't see you guys in the chat Because I am using a thirdparty app, But I'm going to go ahead and get right into it. So we're actually going to clone a pre existing kind of like project And for this we're going to run the command Stellar contract init, contract init. We're going to specify the directories here, Then we're going to create a space, Then we're going to say front end template and we're going to say https, forward slash, and we're going to say `github.com/stellar`, and we're going to say Soroban, astro template. Cool, No demo effect today. As you can see, That went in to the + +[03:00] Directory that I'm working from. All right, So let's see what happened here. We are initializing the project at the root directory. We have a new cargo toml file which actually kind of like injects a compatible Soroban SDK into our project. Here we have example which we're going to need to edit a little bit later. We have our source folder which has some components in it. Right, you should see some things that maybe you recognize, maybe you don't. You have this card file here which is just basically like a cool little front end component that you'll see a little bit later. The pages file here kind of like holds all the main logic that you'll be seeing a little bit later as well. But yeah, So let's just go ahead and keep working through the tutorial here. So, first things first, we have this NV example file. Now you can either copy this over and make a new one or just edit the existing title to just say end, And + +[04:00] We're going to change some of the parameters here. So you see, the standalone or the public sorbar network passphrase is actually going to target the standalone network. So we're going to erase this value and we're going to go ahead and inject the Soraon endpoint here. So public Soroban network passphrase or, excuse me, not the Soroban RP. We won't be injecting the Soroban URL here, But the passphrase instead. So we have test SDF network, September 2015.. Is the passphrase- this correct passphrase for testnet net? Then we're going to go ahead and change our public Soroban RPC URL to the correct endpoint here, which is https- semicolon Soroban testnetstellar org. Okay, And we already have a Soroban account called Alice and we're going to + +[05:00] Just specify the network to be test net. All right, So let's go ahead and clear this out, All right. So what do we have here? Okay, We're looking at our what's it called Our project So far, And we've also updated our environment file. So what we're going to do next is actually just install some of the dependencies. We're going to do that by running mpm install, All right, So let's go ahead and take a look at what's happening here. All right, So nothing special here. We're just running mpm install, installing the dependencies for the project, Sorry. So we're going to go ahead and run mpm, run init for the next step, And what this is going to do is it's actually going to run the initialize js file. So, While this is loading, + +[06:00] Let's just go ahead and kind of just like: take a look at what's happening here. All right, So we're processing the public key, right. And Then we're actually going to use that to deploy the, the smart contract. So we're going to use that as the signer to deploy, to pay the fee for deploying the smart contract itself. All right, So we're using the CLI command, the Stellar CLI. All right, So we're going to generate a new account which we defined here, right, Soroban account equals Alice. So we're going to say Stellar keys generate. And then we're going to say Alice, But we've already done that. So you can think of this function here as kind of like recursively or redoing the step where we fund an account that we would pay for the deployment fees with, But we've already generated that, All right. And Then we're going to build the files, Right. So what does that mean? We're going to use Stellar CLI contract build, which is going to compile the + +[07:00] Smart contract files right, again in contract, down to its WASM format. It's going to store it in the target directory here. Next it's going to use: So Stellar CLI contract deploy. It's going to specify the WASM file, right, And Then it's going to deploy that right to test net. Once it takes, once it deploys the contract to test net, it's going to record the contract ID And then it's going to build the TypeScript bindings. So you see here, contract bindings, TypeScript contract ID, which is going to be the previous ID that was recorded. And let me see If I can get here just to the function itself. So you see, CLI contract bindings- TypeScript contract ID is going to be the ID we recorded And then it's going to store that in the directory name which we've defined here in + +[08:00] Packages. And just as a quick kind of overview, you see this function, bind all, and there's also, I think there is a deploy all, yep, So you can have multiple projects within this directory. For example, you can import the incrementer contract or incrementer project here. It'll actually compile that contract, deploy that as well and create the TypeScript bindings for that. But for this we're going to just use the Hello World contract, All right. So let's go ahead and take a look at the source directory here And Then let's take a look at the Astro page, Cool. So let's just go down from top to bottom. We see import card, right, And here's just like a simple kind of just card that holds all of the data Just makes it easier to like, contain some of the data that we're trying to illustrate Here. We see: + +[09:00] Import hello world from contracts hello world. Now, this part is pretty important Because you see here that we're creating a client and we've defined this client using certain parameters. So, like, what exactly does that mean? Well, When we create a client, you can think of this as creating like a smart contract object. When we created the TypeScript bindings, we actually compiled that data to its own package, right. So what does that mean? Let's just go here into the package json file. You see workspaces, packages, right. And let's go back into the initialize js file. If you see here contract bindings, typescript output, directory name is just going to be the name of the contract packages And then alias. So you see here that we're ingesting the packages + +[10:00] Directory. And what, When you call the command contract bindings or Stellar contract bindings, you're creating ondemand npm packages for the smart contract itself. So let's go back into index contracts hello world, import any as the client or as client from hello world. Now let's dive into hello world itself. So you see here in pack, in the packages directory, under hello world, in the dist directory, Let's just go down line by line and see exactly what's happening. You see some imports coming from the Stellar SDK. Well, that's Because you actually you create your own node module sets for the Stellar SD or for the smart contract here that should import the node modules from, or the Stellar SDK node module Here, you can see, is clearly titled at Stellar + +[11:00] SDK. A cool thing here is that it actually records the network data. So you see, If you're using test net as a point of data for your syntax, here you can actually reference network passphrase and contract ID, which we do in the When we're creating the client object And then moving forward, you see that we're defining all the functions here along with the arguments. So you see, hello only contains one argument, which is two, and it's defined as a string. So this makes it really easy to kind of just like create front end components that leverage the methods on these smart contracts themselves. The bindings function that comes with the Stellar CLI makes it easy to transform the smart contracts over from Rust version over to the front end version After it converts the code to the JavaScript or TypeScript + +[12:00] Output. You also have these like backup or like the, these supporting SDKs to communicate with the Stellar blockchain, all abstracted away from your development process, saving you a bunch of cycles. Long story short, it's a really good tool. I love it. I love using the Soroban or Stellar smart contract bindings personally and yeah, it just really saves a lot of time. So let's do a recap of what happened. In line four, We're importing hello world from contracts hello world, And here you could see that we're using the client to define hello world. Right, Let's dive into the hello world directory a little bit. As you can see, here again we're we have our own node module for the smart contract and it in it includes the Stellar SDK, which is going to help communicate with the Stellar blockchain, Also contains information about the network and, + +[13:00] Last But not least, it contains all the methods included in the hello world smart contract. All right, So let's go ahead and see how we're going to leverage this. So you see, const result equals await. Hello world, hello. All right, So simple enough. We're awaiting a function here and the method is: hello. We're going to define the front end or we're going to find the message as two. All right, So we've already run the initialize function. So let's go ahead and run mpm, run And we're going to say dev. I think this is going to run the initialize function again. All right, 4321. All right, + +[14:00] Let me go ahead and drag this on over here. Make sure, Okay, Cool, So you guys can see. You guys should be able to see the screen here. All right, So we see. Hello you. To get started, open the directory source pages page and tweak the welcome to Astro message above. I don't think that really applies to us. So, con result equals await, hello world. Hello to you. Let's say hello and world. An error occurred Must be a host value. So, So believe it was the exclamation mark. All right, cool. So we have hello world. So we're actually able to change that. This is we're able to do this Because we're not actually making a call + +[15:00] To the back end. So we're just using a simple JavaScript kind of like manipulation and we're able to see the changes immediately thanks to Astro. So this is kind. I don't know If this is exactly exclusive to Astro, But this is like a way that you can easily test the outputs of your functions. For example, in this case we changed the hello world call, the two argument from world. We can even say like Stellar, right. So it's a good way to test your functions on the front end, Very lightweight, doesn't involve any communication with the Stellar blockchain, right? So that's a quick intro, But let's take it a step further, Because that's not all we want to do here, right? We actually want to take this a step further. First, by seeing If we can read some wallet data. So I'm + +[16:00] Going to open up Frraider, All right, So you can see, I'm connected to the test net. You can see my address is G75ZKTV. Okay, cool. So let's go ahead and see how we can connect our wallet data to the front end here, and we're going to use Freighter. So, first things first, we're actually going to install the dependency. I don't think we have it yet. We're going to say mpm, install Stellar for slash freighter API. And here we're going to create a component. We're going to call this connect And, oops, connect. And we're going to one last time, connect freighter. And we're going to say dot, astro. And for this we're going to create a couple of things. We're going to create a div And then we're going to give it a style and + +[17:00] Then we're going to give it a script. So we're going to say div and we're going to say ID equals freighter, equals freighter Wrap and we're going to say: All right, stick with me, y'all Class. And we're going to say equals- Let's see If co pilot saves me. Here We're gonna say Arya, live, equals. And we're gonna say live, Cool, We're going to close that div out or we're going to keep that div open- Actually, we're not going to close that out yet- And here we're going to give another div. We're going to say div class and we're going to say + +[18:00] Ellipses. Here We're going to close this out. Then we're going to say button- Yeah, let's close that out. I'm going to say data connect- Arya, controls, area controls. And we're going to say equals, freighter wrap connect. And it should say button. This needs to be in quotes, Should be good. So far, Cool. So we're going to create some divs here and we're going to get set up with our kind of like environment, If you will, our front end environment for + +[19:00] Hosting the little area that we will contain the wallet information in. So let's go ahead and give this a style real quick. So let's say style and we're going to want a wrap, right, Going to say wrap, And we're going to say text align, Text align, And sure, why not give it a center text align? That's cool. We're going to close this out here And we need to bring this down a little bit, Okay. And Then we're going to give the ellipsus a some values here And we're going to edit the line height. Line height is going to be about 2- 7, and here we're going to say margin- This is going to be auto. + +[20:00] And here we're going to say max width equals 12mm. Here we're going to say overflow is hidden. Here we're going to say text overflow and we're going to say text align and text align equals center. And last But la, not least, But we're going to say whites, space, no wrap, All right. So Now we've got our style defined. Let's keep on moving forward. Thanks for sticking with me So far. Let's go ahead and define this script, All right. So we're going to get into some of the logic behind using the, or some logic that involves using the freighter API. So first things first, we're going to go and define this: the imports that we need. So we're going to say import and we're going to say we need is allowed, we need get + +[21:00] User info. All right, let's go ahead and dive into some what this one actually means. So here is where you're going to get the public key from the Freigher API. So you actually have to have Freigher installed on your local machine, on in your browser, for this to work and you can actually do that by visiting the Freigher website. There should be some download links to quickly add it to Chrome. But here is the function that's going to get the public key that we will be using to. We're going to be using this function to show the public key on the front end of the dApp, All right. So now that we have that imported, let's go ahead and keep moving forward. So we have const wrap equals, document, get element by ID, and no, we're just going to say query selector. We're going to say + +[22:00] Query selector, freighter wrap. We actually want the to be hashtag right there. Okay, cool. We're going to say const button, query selector, data connect- Here we're going to say const ellipsis equals. We're going to say document and for this we're actually going to say document as well. Document, query selector, ellipsis, ellipses. We're going to say at freigher, rap and ellipses, Cool, All right. So we have our wrap defined, our ellipses defined and our button defined. Let's go ahead and get into defining some of these functions. Here We're going to say async function- get pk, get pk. It's going to take in any + +[23:00] Arguments. Let's go ahead and see what is here for us from copilot. This is definitely not right, But very close. So we're going to say const and we're going to say public key await user info- Too easy, And we're going to return the public key- All right. Now we're going to say AC function and we're going to say set logged. Set logged in- Okay. And we're going to take in one argument: That's going to be the public key. Public key is going to be a string- Okay, All right, Set logged in. Public key is going to be of the type string. Now we're going to define some of the functions here. We're going to say the inner html is going to be going to say signed in as + +[24:00] Right- Applause, why not? We're going to say signed in with and we're going to define this value here as the public key value. Is that not correct? Signed in with. And then, oh, I need to close this out. Too easy, Using the wrong kind. There we go: Cool Wrap, Okay. And Then we have here we're going to say ellipsis title and we're going to say equals public key. Cool, Ellipsis html. Ellipsis enter html, ellip, oops, ellipsis Set title. Do we not have this? Ellipsus, ellip, + +[25:00] Ellipsus- Sorry y'all. Quick spell check Going to fix some stuff here. Ellipsis O. All right, What do we got here? Div class- Why is this failing? Lift this title: public key. All right, let's just move forward, Because I did have some errors earlier that I was just kind of working through. So let's say: But these errors did not affect the output, Let's just see what happens. Let's keep working through it and see what happens. So, as you can see, here Copilot has created a handy dandy function to complete the script. We're going to say is: If is allowed, is true, We're going to set the constant, We're going to set + +[26:00] The public key to con public key. We're going to await that We're going to get the public key. There. We're going to say: If public key, we're going to say set logged in to the public key value. Or else we're going to wrap the inner html and we're going to say: freighter is locked. So let's just go ahead and do this here. If public key set, public key logged in. And we're going to say: here you can just kind of go in with astro and just type in the turnary statement itself. You say else wrap, enter html. Sure, We'll say: error getting public key. Why not? Okay If public key. And we're going to say set logged in public key, html- Okay. + +[27:00] And Then we're going to say else here we're going to say, yeah, So we have some good legs to stand on. Here We have button add event listener, right click async function. That's true, This is all good. I'll just get rid of this. Maybe that wasn't as good as I thought. Button add event listener And let's say, going to disable that button Equals- true. Kind of want that button to disappear real quick. Then we want to say await- set allowed. Then we want the constant public key to be u, to be the return data of the function that we call earlier, And we're going to say set logged in public key- All right. So we should be all cleaned up here, + +[28:00] All right. And again, I am having some issues on my side with the like, the errors and things like that. But let's just go ahead and see If this runs anyway, All right. So let's go ahead and run dev one more time. Again, thank you all So much for being here with me and thanks for hanging out. I know these things kind of like run on sometimes. I never expect them to go as long as they do, But they always exceed my expectations. And let's see. Let's see what happened here. All right, let's go 4, 3, 2, 1. Doesn't look like that was implemented Because we never went to the index page Right. So next up we have to actually import our connect freighter + +[29:00] Button. So let's say import and we'll say connect freighter from components, connect freighter astro, And we're going to just kind of like plug this in somewhere. I don't know, We'll plug it in right here. We'll say connect freighter- Very cool Greeting, All right. So you should see signed in as GA75F. Again, let's go ahead and see exactly what's happening here. Connect freighter- Again, you see, in the script side we're importing is allowed, set allowed and get user info. The way that we're illustrating or like showing the public key is that we're saying this async function, set, logged in. We're saying that inner HTML is going to be the public key, right, And all the public key is the function get user info, All right. So that's a little bit about defining the functions on the right, using Astro to kind of demonstrate + +[30:00] Some calls right Without calling the blockchain, And then also integrating Freigher Stellar's kind of like primary wallet. Well, Stellar doesn't have a primary wallet, So to speak, But the in house developed wallet Frraider, This. We went over how to connect that to the front end itself and we see that being reflected here. All right, So you guys have been with me for about 30 minutes, But we're going to keep it going. All right, we're going to keep the session going. As I said, every time I do, one of these I kind of like exceeds my expectations. So we're going to switch gears a little bit. We're actually going to go to a more advanced smart contract, kind of like undertaking, If you will, And what we're going to do is I'm going to share with you the Sora or Stellar- excuse me- example dApp. So let's say, + +[31:00] Stellar- Applause- example dApp. All right, And I'll just do a quick little note: bash, sh. All right. So If you want to clone the example dab, all you have to do is run, get clone, and you're going to say: this guy right here, I don't have the link handy, But you can clone the example dApp by going to this link right here And I guess I could show you guys what's going on Here. All right, Here's the Soroban example dApp. You could see that it has a + +[32:00] Readme. It needs to be updated actually, But it's a full stack boilerplate dApp that we aim for you guys to use this to create your own crowdfund applications. It's got a bunch of use cases particular to public goods funding, and we'll see how we can kind of tweak this to aim it towards a project outside of the original use case, which is to raise money for an art distribution, and we'll see what we could do here. So, without further ado, let's just go ahead and kind of like dive into it Again. If you're interested in following along, you can run the command get clone And then plug in the link here, And If you want to just check out the repo, you can check it out at the Stellar repo. Soroban DEX example dApp. All right Now. This is one of my favorite dApps Because it's one of the dApps that I learned very early on. So + +[33:00] I hope my enthusiasm spills over and through the airwaves, So to speak, And, yeah, hope you guys learned something today. So let's go ahead and dive right in with the smart contract. First things first. You see some attributes you may be familiar with from the last session. Contract contract implementation is a new one, maybe token address and into val And then vow. So we'll explain. We'll dive into these as we go through the contract, But I may gloss over a few things for the sake of time. Here you see the public enumeration data key and this is where you're going to hold a bulk of the keys for the contract storage. Remember, as from the last session, each key has a value and each value is defined as a certain type. Deadline, recipient started, target token, user address and recipient claimed are our keys And we'll go ahead and see what + +[34:00] Different types belong to these values a little bit later. Moving on, you see that we have the state of the smart contract here and it's just going to be defined by three values: running, success and expired. Now let me do a quick backtrack. I know I said that this example, dApp, was for a crowdfunding application and it is So. Some of the components behind the smart contract are that it has a deadline for the campaign to run, It has a recipient to receive the deposits for the campaign, the crowdfund campaign, The token that it's going to receive for contributions, It's going to keep track of the users that contributed And Then it's going to give some boolean values of whether or not the target has been reached, If the recipient claimed, etc. And these states define the or this enumeration defines the state of the campaign. So it can be either running, it can be successful. If it didn't reach the target, Then it will be expired. + +[35:00] All right, So we have our getter functions here also known as utility functions. They're just retrieving certain data. Here You see that we're retrieving data from the ledger. We're getting the time stamp and here you see that we're just retrieving the value for it for some of the keys. All right, So these are our getter functions and we also have our setter functions, which are mostly defined here, But they're called actually in the initialize function. All right, So let's go ahead and take a look at the initialize function. You see, public function, initialize, passing in the recipient u value, which is an address, deadline value, which is a U64 target amount, which is going to be the amount that you want to get into the deposit or get into the campaign via deposits, The token address, which the address of the token that's being + +[36:00] Transferred from the depositor wallet to the crowdfund contract. All right, So let's go ahead and just step right through. So, first things first, we're going to assert that the storage, the contract storage, does not already have a recipient, Because if it does, that would say or that would tell the user that this contract is already initialized. Contract can't be initialized. If you want to call the initialize function, we're just going through our setters here. You see esto storage instance set and we're setting values to all of those keys. You see recipient, claimed, started, deadline, target and token. All of these keys are being attached values that we're passing in through the initialize function. Cool, Good to go there. And this smart contract actually consists of two other functions that are kind of like the crux of the functionality + +[37:00] Behind this entire contract success, really. So this is: the deposit function takes in two arguments: user and amount. Now I want to quickly highlight. On line 218 you see user require off. Now one cool thing about Soroban, the Soroban Rust dialect, is that we actually have an authorization framework, kind of installed or implemented, rather, in the Soroban SDK. So what does this mean? This means that the user has to have the address that is making the call. If I am user A, I can't pass in address B for this call, And this require off method ensures that the wallet being used to make the call is actually the one making the call. This cannot be another wallet's address, All right. So a simple way to assert that the wallet making the call is using the same value as the + +[38:00] Address, All right. So you, you're saying the assertion here, the amount has to be higher than zero, Too easy. We're doing some other checks here, stating that the campaign is still running, The sale isn't running, Can't make a deposit, All right. So next up, let's take a look at some more interesting logic here. We're getting the user deposit balance, etc. But let's take a look at line 230. So you see line 230, let client equal token client new. Sorry, where was I see let client equal token client new. Where was I? Okay, let client token client new environment. And then token ID, all right. So this is cool Because you see token right and this is actually a crate that is implemented into the + +[39:00] Soroban SDK that allows users to kind of leverage this functionality as an interface. So you see, token right, that's the SDK, that is the. That's, that's the logic behind the token contract, the Soroban or Stellar token contract. You see this client here which allows you to kind of create like an interface to lay over a contract address- new, obviously, creating a new one. And we're laying the token interface over the token ID. So we're going to call different functions via this token interface at a different address. So we're actually going to be making a crosscontract call- Cool. Now you see client transfer, right, And we're transferring from the user to the current contract address and the amount. Next you see contract balance, Get the balance And Then we're emitting a amount, pledge amount changed event here. All right. So that's simple enough. That's the deposit + +[40:00] Function. Now let's go ahead and move into the withdraw function. Again, we're defining the state and recipient, doing some checks here and saying, Hey, If this sale is still running or If the campaign is still running, cannot make a withdrawal. Again, we're doing the client or we're defining the token here, calling transfer on the token, transferring it from the recipient or transferring it from the crowdfund contract address right over to the recipient address. And yeah, moving forward, we're going to set the smart contract storage, set user deposited to zero. All deposits across, all- excuse me- deposits across the smart contract storage should be reset back to zero And the events will be emitted to reflect the changes that were just made. All right, cool. So let's go ahead and + +[41:00] See how this works in action. One cool thing about this repo is that it comes set up with some really easy to follow directions. Let's go ahead and dive into the package json file. Here you could see that there is a reset function that's going to run mpm run clean and npm run setup. If we look at mpm run setup, all that's going to do is run the initialize s sh script. So I'm going to go ahead and dive into the initialize sh script, or initialize shell script rather, and just kind of break down what's going on here. First things first, we're going to define a path, a new path, for a binary file. That's Because we're actually going to be installing the Stellar CLI as a binary file within the project directory itself. That way you can leverage a pinned version of the Stellar CLI to make sure that the kind of + +[42:00] Tutorial, If you will, or deployment of all smart contracts and front end of the dApp does not fail. It must be pinned to a certain version that is compatible with the Stellar or the Soroban SDK. Rather, All right. So you see, in setup we are running the initialize sh file. Again, let's go ahead and dive back into that. Where are we at? Initialize sh? All right, So that's where we were. Line nine: we're defining the binary path file. That's Because we're going to use the Stellar CLI as a binary right. We'll kind of like gloss over this a little bit. Just know that it is stationed in the target bin directory, Cool. And that's going to run cargo installer, which is again going to install a pinned version of the Stellar CLI. Real quick. We're going to be able to pass in some values When making the call. If you look at here, we're saying initialize shell And then we're defining the network as test net. Excuse me, The + +[43:00] Script here makes it super easy by allowing us to type in the value standalone, futureet or testnet net and we'll kind of have these preconfigured values for the endpoint and appropriate RPC host URLs defined in the script. So When you pass in the argument standalone, future or testnet net, it'll quickly plug those arguments in and define that for you to save you time a little bit later. All right, we're defining a network by using the RPC URL and passphrase And here we're actually going to write some of the data, the deployment data, to a folder in the directory called sorbomb. All right, so that's all that's happening there. Here on line 87 we're generating a new identity called token admin by using Stellar keys. If the Stellar token admin identity already exists, we're going to skip over this. But If not, we're going to generate a new + +[44:00] Identity using the keys method- again, Stellar keys generate token admin, and we're going to do that on the test network. All right, So I'm going to start moving through a little bit faster Because we are reaching about 15 minutes out from the hour, So hang in there. All right, So we're defining the Stellar keys address as the token admin- was it called token admin identity we just created through the Stellar keys generate method. We're funding that. This should actually. This is redundant Because we actually fund it When calling the generate method. Next up, we're deploying the abundance token, which is just a Soroban token. We use this as a mock token for demonstrating a mint and deposit capabilities After this. So we deploy both the abundance token- So random Soroban mock token- and the crowdfund ID or, excuse me, the crowdfund smart contract. This kicks back a crowdfund ID which we Then use later to generate + +[45:00] TypeScript bindings. All right, So we got some messages here, some success messages that kick back the ID, And then we write that ID to a directory calledsorbon. Last But not least, at the very end of the script we initialize the abundance token. We give the name abundance, a new symbol set the decimal places, give it the name abundance, And then we set the admin as the new identity we just recently generated. After this. Finally, last But not least, we initialize the crowdfund contract. We set a deadline for about one- 86 400 seconds is one week from now- And we say Stellar, contract invoke and we're calling the initialize function. So, going through the setter functions again, you we're setting the recipient key as the admin address. That's the value setting the value of the key deadline as the deadline value, target + +[46:00] Amount key will be set to the value of I think this is 10 000 or 1 000 tokens, I believe, and the token is going to be the abundance token that we just deployed. So I say all that to say this: Let's go ahead and run mpm, run reset, and we'll just go ahead and see what happens here. By the way, thank you So much for sticking with me thus far. I know these sessions can be quite long, But I'm really hoping that you guys are learning a lot and, again, really happy that you guys are sticking with me throughout this journey. Real quick, I kind of wanted to do a re, a recap over the freighter connection kind of lesson here. So If you remember, we in. We implemented freighter through the following ways: right, We actually used the method get user info, right. So, If you remember, we use get user + +[47:00] Info and we return the public key. That's exactly what we're doing in this application. Here We go to the wallet data component. You see use account and use is mounted. Now wallet data is going to use both of these hooks. But If we go back into use, accounting at the end of the day is using get user info right from the Stellar API And then return, returning these objects. So that's how we're able to use freighter to return the public key. And let's go ahead and dive into some of the main functionalities of the front end components that require some of the logic from Freigher to sign some transactions. Yeah, let's go ahead and do that right now. So If we look at the form pledge, I believe form pledge Is this it. Yes, We look at the form + +[48:00] Pledge component here. This is actually going to contain a button that's going to allow us to mint the abundance token. So again, that mock token that we're using, this is how we're going to access the mint function. So If you see on click async, set submitting equals, true const transaction, again equals, await abundance mint. And again, we're able to do this Because when we create typescript bindings, we're creating node modules for our smart contracts, right? So the abundance smart contract has the method mint, which we're able to leverage Because of the typescript bindings. Again, they have the SDKs installed on the back side of the node modules and they're able to communicate with the Stellar blockchain by kind of creating the calls by hand using JavaScript. All to abstract away development cycles for you and save you + +[49:00] Time. All right, So we have two, which is the account And then amount, And Then we're defining the public key that's returned by freighter And this is the key that we're using to sign. All right, And that logic continues with both calls. So you see, let me try to track this down real quick. So that was the mint function. And Now you should see the deposit function. Right, Crowdfund- Let's just go ahead and do a quick search. Deposits is not in this one. Deposits for molecules. Deposit, transaction set, balance, prop, address, form, pledge. We should see a deposit function in here. Yep, So transaction fund, deposit, And + +[50:00] Then we're going to pass in the user and the amount, Same as before. We're calling the public key or await public get public key method to pretty much pass in the serer there And that's going to be Because of freighter. So we're going to say mpm, rundev. Let's hope everything works. I'm hoping All right. Shared contracts. One more one second, real quick, Let me go ahead and I actually have to run contract bindings, All right. So here we have to run yarn, run and say: bindings, Okay. So one quick hiccup here. During the deployment process, When we ran + +[51:00] Initialize, it actually didn't run the binding script, Just full transparency. We're going to be deploying this to versel And there is a quick issue with the original package json file that comes with the pre install command. Now this pre install command will actually build the TypeScript bindings for you which is required for the front end deployment. So I deleted that call and that's kind of like why we're seeing that error here, Because there's no TypeScript bindings for the abundance contract. But let's go ahead and run this dev function again and see If that worked. It should work. Okay, that was very close. Good thing. The demo effect did not get us today, All right, and you can see my wallet address here. This means that + +[52:00] Freighter is hooked into, or the freighter API is hooked into, the front end component. So this is refle reflecting properly. Let's go ahead and try this: mint, Okay. So Freigher actually doesn't allow websites that don't have an SSL certificate to make calls. So what we have to do here is we have to transfer this over to a production type of deployment. So stick with me here. What we're going to do is we're going to remove the target rm RF target, All right. So we're going to move the target directory and we're going to use Verscell to deploy the front end. So I already have Verscell on my machine. But If you don't, you'd have to run MPM I, Verscell or MPMI G to install Versel globally. + +[53:00] Now I'll just quickly run Verscell. I have it installed globally Here. We're going to in: set up and deploy this app. Here We can say yes, Yep, I don't want to link to an existing project. I'm going to give this a new name. We're going to say u Soroban, example: dApp, So said. And we're going to say 07- Yeah, for July, So said07. We're going to go ahead and deploy this, Set up the project here, All right, we don't want to modify any of these settings. So we'll say no And we're going to go ahead and wait for this deployment to kind of build and be set up to production. It's going to take a little while, + +[54:00] All right, just kind of waiting for a quick sec. I do want to open this up to the side to make sure that there's no problems. I don't think it should have an issue. Make sure freigher is logged in. All right, we're logged in- G7 CKTV, All right. So the deployment is ready. It did not crash And we can actually. You can actually access it through this URL here, So I'll be happy to share that. Let's go ahead and connect wallet- All right. So first time connecting to a + +[55:00] Website, Freigher is going to send this popup and say: Hey, do you trust this website? I'm going to go ahead and connect it. I did click connect, But it doesn't look like it was updated, So I'll do a quick refresh. You could see my public key being illustrated here, being displayed here. I'm going to go ahead and click the mint 100 abundance token button. All right, And look at that fee: 189. All right. So Stellar is about 0, 1 cents right now. Right, So 10 cents And 0, 1. I mean you got to imagine that is such a low fee And that can really help out startups and just users in general, right? Why charge users an arm and a leg for fees, All right. So that call went through and you can see that the balance has been updated to 100 abundance tokens. So that was the mint function. And again, just quick recap Because I'm super proud of this: we went through the mint function earlier. + +[56:00] Right, Mint function. We'll say, where are we here? Amount input, pledge- I'm So hyped I can't even remember where we were. Okay, So, amount inputs, author info button, connect button, deposits- right, Whoops. So let's just say mint, All right. So here we are in the form pledge. Excuse me everyone, But again. So again I was super excited, Couldn't figure out where we were in the code, But this is where we want to be: Line 49 in the form pledge file, into the molecules directory. You see, con transaction equals await abundance mint And that's what we called right from the front end When we called the mint function Right from here. This is all the logic that happened. We passed in the account, which was determined by connecting with freighter, and the amount + +[57:00] Which is predetermined within the code itself. So we're just saying the amount is going to be 100, All right. So now that we have 100 abundance tokens in our balance, go ahead and back this project with 100 abundance tokens, let's go ahead and review the call And then we're going to see here that the contract is receiving the tokens and the sender is going to be the address or the wallet that we're using and we're going to be sending 100 Abundance tokens. So we'll go ahead and approve and continue, We'll sign the transaction and we'll hope it goes through. Boom, The campaign has made, or we've made, a 100 abundance token deposit right to the campaign and we have concluded + +[58:00] The campaign itself, Because our goal was 100 abundance tokens And, yeah, as you can see, this was updated in real time Because of the subscription hook that is actually in the boiler plate itself. All right, so that is my time. In conclusion. You know, I highly suggest that you guys check out the docs, that you can find them at `stellar.org/developers`. And If you guys have any questions Then please join the Stellar Discord. There is a developer channel there. There's also a help channel there and we are always ready to answer any of the questions that you guys have along your Stellar development journey. Once again, my name is Julian Martinez, senior de. Excuse me, once again, my name is Julian Martinez, Senior Developer Advocate and common man here at SDF. Again, thank you guys So much for being with me weathering the storm. + +[59:00] It's been a great hour of diving into dApp development. I hope you guys learned something and until next time take care. Bye. + +
diff --git a/meetings/2024-08-01.mdx b/meetings/2024-08-01.mdx new file mode 100644 index 0000000000..01dbf895cd --- /dev/null +++ b/meetings/2024-08-01.mdx @@ -0,0 +1,34 @@ +--- +title: "Freighter Swaps via Soroswap" +description: "An overview of Freighter’s swap flow moving to Soroswap, including how the integration works, what developers need to implement, and how Soroswap’s Router SDK optimizes routes across Soroban DEXs." +authors: + - esteban-iglesias + - francisco-catrileo + - naman-kumar + - piyal-basu +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discordapp.com/channels/897514728459468821/1267624969643229275) + +Recent updates introduce a revised execution flow within Freighter that changes how certain operations are handled under the hood. The discussion focused on what has changed, how applications should adapt their integrations, and what developer tooling is available to support the updated approach. Overall, the aim is to provide a clearer and more maintainable integration path as the platform evolves. + +### Key Topics + +- Freighter swaps are now served through `Soroswap` instead of the Stellar DEX +- Developer integration guidance for the new swap path +- Palta Labs’ DEX aggregation work and its availability for general use +- Soroswap Router SDK: + - Finds the optimal swap route by cost + - Routes across multiple DEXs on Soroban via a single SDK interface + +### Resources + +- [Soroswap](https://soroswap.finance) +- [Freighter Soroswap integration instructions](https://github.com/stellar/freighter/blob/d248f2ad0aa03da72ea6eeaf7907ac0454fdcc72/extension/INTEGRATING_SOROSWAP.MD?plain=1#L2) +- [Palta Labs](https://paltalabs.io) +- [Router SDK](https://docs.soroswap.finance/03-technical-reference/07-optimal-route/01-soroswap-router-sdk) diff --git a/meetings/2024-08-08.mdx b/meetings/2024-08-08.mdx new file mode 100644 index 0000000000..7e0d1809a6 --- /dev/null +++ b/meetings/2024-08-08.mdx @@ -0,0 +1,55 @@ +--- +title: "Zephyr on Mercury: Simplifying dApp Development" +description: "An overview of Zephyr running on the Mercury indexer, showing how this runtime environment reduces complexity for dapp developers through higher-level abstractions and streamlined data access." +authors: [naman-kumar, tommaso-de-ponti] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discordapp.com/channels/897514728459468821/1270154917432131686) + +The session introduces Zephyr as a runtime environment built on top of the Mercury indexer, with a focus on how it can simplify common patterns in decentralized application development. Rather than interacting directly with lower-level indexing and data-fetching logic, developers can rely on Zephyr to provide more ergonomic access to indexed data. + +Through examples and discussion, the talk highlights how pairing Zephyr with Mercury can reduce boilerplate, abstract away indexing and data-access complexity, speed up iteration, and make it easier to reason about application state when building on Stellar’s ecosystem. + +### Key Topics + +- What Zephyr is: + - Mercury’s cloud execution environment powered by the `ZephyrVM` + - A hosted, serverless runtime for custom indexing and data workflows +- When to use Zephyr vs predefined Mercury indexing: + - Zephyr for fully custom APIs, aggregations, bots, alerts, and simulations + - Built-in Mercury patterns for simpler event or contract indexing +- Zephyr execution model: + - Compiled Rust programs running in a sandboxed WebAssembly VM + - Direct access to Stellar ledger data and ledger close metadata + - No need to run or maintain Stellar nodes +- Capabilities enabled by Zephyr: + - Custom database schemas and queries + - Multi-step data processing and aggregations + - External web requests and integrations + - Contract calls and simulation + - Serverless, callable APIs (custom RPC-like endpoints) +- Mercury Cloud vs mapping-based indexers: + - Lower-level and more granular data access + - Ability to tap into external services + - More flexible than handler-style workflows +- Soroban integration: + - Built-in Soroban host support inside the ZephyrVM + - Use of Soroban SDK primitives directly from Zephyr programs + - Easier handling of Soroban contract data structures +- ZephyrVM details: + - Sandboxed execution of untrusted code + - Specialized tooling to simplify indexer development + - Designed to support indexing, monitoring, bots, and alerts +- Extensibility and roadmap: + - Ongoing development with community-driven feature requests + - Issue tracking via the Zephyr toolkit repository + +### Resources + +- [Mercury’s Zephyr Documentation](https://docs.mercurydata.app/zephyr-full-customization/introduction) +- [Project GitHub](https://github.com/xycloo) diff --git a/meetings/2024-08-15.mdx b/meetings/2024-08-15.mdx new file mode 100644 index 0000000000..d5cc1f333b --- /dev/null +++ b/meetings/2024-08-15.mdx @@ -0,0 +1,40 @@ +--- +title: "Decentralizing Stablecoins with Orbit Protocol" +description: "A discussion on Orbit Protocol’s approach to decentralized, multi-currency stablecoins built on Blend, including peg maintenance, smart wallets, and a path toward an orderbook-less Forex perpetual exchange on Stellar." +authors: [ishan-singh, julian-martinez, robin-olthuis] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discordapp.com/channels/897514728459468821/1272997152452247663) + +Speakers from Orbit outlined how the Orbit Protocol leverages Blend to issue decentralized stablecoins across many currencies, using collateralized debt positions and a decentralized pegkeeper to maintain price stability. The goal is to enable issuers to keep reserves in local markets while minting on-chain stable assets, significantly reducing FX and remittance costs and expanding access to financial tools. + +The session also introduced Hermes, Orbit’s liquidity and control layer, and explored how smart wallets and stablecoin primitives can be combined to power an orderbook-less perpetual exchange. Together, the Orbit + Hermes stack aims to bring decentralized Forex trading, yield, and improved UX to Stellar, while highlighting the design tradeoffs around liquidity, peg management, and protocol complexity. + +### Key Topics + +- [`@Soiled`](https://discord.com/users/603296640568393746) and [`@Robeart`](https://discord.com/users/226635735350902785) from Orbit spoke about using Blend to create decentralized stablecoins for all currencies under the Orbit Protocol, utilizing a decentralized pegkeeper to maintain their price, and leveraging these stablecoins and smart wallets to create an orderbook less perpetual exchange, bringing forex to Stellar. +- Discussion lays out Orbit’s Blend-native protocol for collateralized debt positions denominated in many currencies and the Hermes liquidity layer, including the approach to keep the peg, use smart wallets, and launch a forex-focused perpetual exchange. + - Orbit builds on Blend so issuers can mint stablecoins for many local currencies, keep reserves in their home markets, and reduce FX/remittance costs by up to ~80% while improving financial inclusion. + - Hermes manages peg parameters, fee schedules, and treasury routing while ensuring safe withdrawals, balanced deposits, and multi-currency leverage, with DAO-controlled pegkeeper settings. + - The combined Orbit+Hermes stack pushes decentralized Forex, on-chain yield, and smart-wallet UX, though it requires careful handling of liquidity, interest-rate tuning, and trustline/setup complexity. + +### Resources + +- [Orbit presentation](https://docs.google.com/presentation/d/1mDOrBLfe8-Bq6VCy7r5bb4w_uZjq-EOorbV3ZwYfs1k) + +:::note + +The hosts microphone audio is not in the video so there is some silence during Q/A. Here are the question asked during the Q/A: + +1. ([From `@markus_0_`](https://discord.com/channels/897514728459468821/911254664576643122/1273737455413100686)) why do you always have an infinite amount of tokens in the pool? Wouldn't it be safer to start small and mint more as demand opens up +2. ([From HunterIonize](https://discord.com/channels/897514728459468821/911254664576643122/1273740952632299521)) What purpose does this serve exactly? Sorry to be blunt +3. How do you see the Orbit Protocol contributing to financial inclusion on a global scale, particularly in underbanked regions? What challenges do you anticipate in achieving this? +4. In 5-10 years, how do you see the landscape of Forex on blockchain evolving? What role do you believe Stellar will play in this evolution, and how will Blend and Orbit Protocol be at the forefront? +5. Are there any asks of the developer community? + +::: diff --git a/meetings/2024-08-22.mdx b/meetings/2024-08-22.mdx new file mode 100644 index 0000000000..5875bbbc5f --- /dev/null +++ b/meetings/2024-08-22.mdx @@ -0,0 +1,46 @@ +--- +title: "Updating Constructors, Wasmi VM, and BLS12-381" +description: "Core protocol updates covering Soroban constructors, a Wasmi VM upgrade with lazy compilation, and the addition of BLS12-381 cryptography to support advanced use cases like zk applications." +authors: + - dmytro-kozhevin + - graydon-hoare + - jay-geng + - naman-kumar +tags: + - developer + - CAP-58 + - CAP-59 + - CAP-60 +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/900374272751591424/1275577430043525204) + +Core developers reviewed several in-flight protocol proposals aimed at improving Soroban smart contracts, execution performance, and cryptographic capabilities in Stellar Core. The discussion built on earlier meetings and focused on refinements, performance tradeoffs, and developer feedback. + +The session covered how constructors should behave in Soroban, improvements to the Wasmi virtual machine to reduce execution costs, and the introduction of pairing-friendly cryptography to unlock new application classes while staying within network resource limits. + +### Key Topics + +1. The proposal to add a constructor to the Rust Soroban variant was introduced in a previous protocol meeting ([prior meeting](./2024-07-25.mdx)), documented in CAP-0058. A constructor is a function that only runs the first time a contract is created. +2. In this meeting, Dima discussed updates made since the last meeting: + 1. Default constructor: if a constructor is not explicitly defined, the contract is treated as if it has one. + 2. Return value semantics: if a transaction succeeds, it must return a valid value. + 3. Constructor interaction with custom accounts: custom accounts must be aware of the context they are authorizing. +3. Graydon discussed the Wasmi VM update, documented in CAP-0060. Wasmi translates WebAssembly to an Internal Representation (IR) and then executes it. The update has two impacts: + 1. Translating from WebAssembly to IR takes more time, but executing the resulting IR is efficient. + 2. The update introduces lazy compilation. Of all the functions in a contract, only those called in a given transaction are translated, reducing latency and fees. +4. Jay discussed adding the BLS12-381 cryptographic curve, documented in CAP-0059. + 1. Pairing-friendly curves enable zk applications. Eleven host functions were added to expose mapping, pairing, and arithmetic on the BLS12-381 curve. + 2. An example BLS signature verification consumed 26M instructions (running natively), which is promising given the per-transaction limit is 100M. + 3. There was general agreement that the interface is correct, and the discussion continues in Discord. + 4. Jay asked developers to build against the function and share feedback. + +### Resources + +- [CAP-0058: Soroban Constructors](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0058.md) +- [CAP-0059: BLS12-381 Cryptography](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0059.md) +- [CAP-0060: Wasmi VM Update](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0060.md) diff --git a/meetings/2024-08-29.mdx b/meetings/2024-08-29.mdx new file mode 100644 index 0000000000..66edee0be7 --- /dev/null +++ b/meetings/2024-08-29.mdx @@ -0,0 +1,43 @@ +--- +title: "Reviewing Constructors, BLS12-381, and VM Performance" +description: "A core CAP review covering Soroban constructors, BLS12-381 cryptography, and VM performance improvements, with a focus on compatibility, validation tradeoffs, and incentives for contract optimization." +authors: + - david-mazieres + - dmytro-kozhevin + - graydon-hoare + - jay-geng + - naman-kumar + - nicolas-barry +tags: + - developer + - CAP-58 + - CAP-59 + - CAP-60 +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/900374272751591424/1278045556211716171) + +The core CAP team reviewed three proposals advancing Soroban’s developer model and execution performance. The discussion focused on ensuring backward compatibility for constructors, clarifying cryptographic guarantees for new curve support, and validating the real-world impact of VM optimizations. + +### Key Topics + +1. Add a constructor to the Rust Soroban variant. + 1. The team was concerned about a potential compatibility break, which Dima addressed. + 2. There were no further concerns. +2. Add the BLS12-381 curve and required field arithmetic. + 1. The team was concerned about providing functions to verify invalid inputs. It is too computationally expensive to validate in the contract layer, so this may need to be implemented as a host function. Jay is looking for ecosystem feedback on use cases that require strict input validation. + 2. There were no further concerns. +3. Improve Soroban VM performance. + 1. The team commented on the accuracy of the measurement method, but the demonstrated wall-clock time benefits were considered promising. + 2. It was suggested to expose the performance improvements to contract developers, creating an incentive to optimize contracts and take advantage of the improvements. + 3. There were no further concerns. + +### Resources + +- [CAP-0058: Soroban Constructors](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0058.md) +- [CAP-0059: BLS12-381 Cryptography](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0059.md) +- [CAP-0060: Soroban VM Performance Improvements](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0060.md) diff --git a/meetings/2024-09-05.mdx b/meetings/2024-09-05.mdx new file mode 100644 index 0000000000..8466d1e5d7 --- /dev/null +++ b/meetings/2024-09-05.mdx @@ -0,0 +1,40 @@ +--- +title: "Galexie and the Composable Data Platform" +description: "A walkthrough of Galexie and the Composable Data Platform, showing how Stellar ledger data can be extracted, compressed, stored, and transformed through flexible, pluggable data pipelines." +authors: + - chris-anatalio + - shawn-reuland + - simon-chow +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/900374272751591424/1280678171053789317) + +The Platform team presented Galexie as a core component of the Composable Data Platform (CDP), focusing on how developers can build flexible, end-to-end data pipelines on top of Stellar ledger data. The demo highlighted how raw network data can be efficiently extracted and prepared for downstream analytics or application use. + +The discussion emphasized composability: teams can mix and match storage backends, transformation steps, and execution modes (batch or streaming) to suit different workloads, from historical analysis to near-real-time data feeds. + +### Key Topics + +1. Galexie + 1. Data Extraction: Extracts raw ledger data from the Stellar network + 2. Compression: Compresses raw data for efficient storage + 3. Storage Options: Supports runtime configuration through the Datastore abstraction to use various physical storage layers, starting with Google Cloud Storage (GCS) + 4. Operating Modes: can run in batch mode or streaming mode +2. Composable Data Platform + 1. Flexible Datastore: Multiple options for physical data storage layers + 2. Galexie: Used to extract, compress and export data to your chosen Datastore + 3. Transform: Structure data in a model suitable to your application +3. Pluggable Data Pipelines + 1. Workflows: Create ETL(extract, transform, load) pipelines + 2. Streaming: Fast, lightweight streaming data + +### Resources + +- [Galexie repository](https://github.com/stellar/stellar-galexie) +- [Galexie docs](/docs/data/indexers/build-your-own/galexie) +- [CDP blog post](https://stellar.org/blog/developers/composable-data-platform) diff --git a/meetings/2024-09-12.mdx b/meetings/2024-09-12.mdx new file mode 100644 index 0000000000..becb667b80 --- /dev/null +++ b/meetings/2024-09-12.mdx @@ -0,0 +1,36 @@ +--- +title: "CLI Aliases and Ledger Snapshots" +description: "An overview of new Stellar CLI features that improve developer workflow, focusing on contract aliases for easier invocation and ledger snapshots for repeatable local testing." +authors: [carsten-jacobsen, david-mazieres, nando-vieira] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/900374272751591424/1282934024892973077) + +The Dev Experience team presented recent Stellar CLI enhancements aimed at simplifying smart contract development and testing. The session focused on reducing friction when working with contract IDs and improving reproducibility in local test environments. + +Through live demos, Nando showed how aliases can replace long contract IDs in day-to-day CLI usage, and how ledger snapshots enable developers to capture and reuse a specific ledger state when writing and running tests. + +### Key Topics + +- CLI contract ID aliases: + - Installed the Hello World example to show aliases. + - Showed examples of how contract IDs are often passed as CLI parameters, such as in `invoke` (copying the ID string or using command substitution). + - Demonstrated how to deploy a smart contract and create an alias. + - Demonstrated how to invoke a smart contract using the alias. +- Ledger snapshots: + - Creating a snapshot of a specific ledger state + - Using snapshots in test cases for deterministic behavior + - Improving repeatability and reliability of local contract tests +- Documentation updates: + - New command-line examples tailored for Windows users + - Introduction of a CLI recipe book with common workflows + +### Resources + +- [Stellar CLI Cookbook](/docs/tools/cli/cookbook) +- [Stellar CLI Manual](/docs/tools/cli/stellar-cli) diff --git a/meetings/2024-09-19.mdx b/meetings/2024-09-19.mdx new file mode 100644 index 0000000000..78c7eccf8c --- /dev/null +++ b/meetings/2024-09-19.mdx @@ -0,0 +1,32 @@ +--- +title: "Hello World dApp with Soroban and Next.js" +description: "A step-by-step walkthrough of building a simple Soroban-powered Hello World dapp, covering smart contract setup, TypeScript bindings, and a Next.js frontend using the JavaScript SDKs." +authors: [carsten-jacobsen, elliot-voris] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/900374272751591424/1285627254130610297) + +This session demonstrates how to build an end-to-end Hello World decentralized application using a Soroban smart contract and a Next.js frontend. The focus is on the full developer workflow: creating the contract, generating client bindings, and wiring everything together in a modern web app. + +Carsten walks through how the Stellar CLI and JavaScript tooling simplify integration, showing how a frontend can invoke a Soroban contract, submit user input, and display on-chain responses with minimal boilerplate. + +### Key Topics + +1. Create the default Hello World smart contract using the Stellar CLI. +2. Create TypeScript bindings (package) using the Stellar CLI. +3. Create the default Next.js project using `npx create-next-app`. +4. Add and link the TypeScript bindings package to the Next.js project. +5. Build a simple frontend with a form to submit a string. +6. Import the package in the Next.js page and configure a client. +7. Create the submit function to send the form value to the smart contract. +8. Use `useState` to store the smart contract response and display it. + +### Resources + +- [Install the Stellar CLI](/docs/build/smart-contracts/getting-started/setup#install-the-stellar-cli) +- [Stellar CLI Manual](/docs/tools/cli/stellar-cli) diff --git a/meetings/2024-09-26.mdx b/meetings/2024-09-26.mdx new file mode 100644 index 0000000000..0ec3e16fc3 --- /dev/null +++ b/meetings/2024-09-26.mdx @@ -0,0 +1,33 @@ +--- +title: "Hoops Finance DeFi Platform Overview" +description: "An overview of Hoops Finance and how the platform simplifies DeFi participation on Stellar through guided workflows, AMM abstractions, and public APIs." +authors: + - bastian-koh + - chris-anatalio + - elliot-voris + - timothy-baker +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/900374272751591424/1288890126038208532) + +Hoops Finance presented an overview of the DeFi platform they are building, with a focus on making decentralized finance more accessible to non-expert users. The discussion centered on how the protocol abstracts technical complexity while still exposing powerful functionality on Stellar. + +The team walked through their approach to guided user experiences, liquidity provisioning, and automated market maker interactions, highlighting how developers and end users can interact with AMMs through simplified interfaces and public APIs. + +### Key Topics + +- Abstraction of DeFi investment complexity through guided, step-by-step user prompts +- Simplified liquidity provisioning that hides underlying LP mechanics from end users +- Public AMM API supporting both read and write access to AMM data on Stellar +- Developer-facing tools designed to enable easier integration with Soroban + +### Resources + +- [Hoops Finance](https://www.hoops.finance) +- [Hoops Finance GitHub](https://github.com/Hoops-Finance) +- [Hoops Finance API Documentation](https://api.hoops.finance) diff --git a/meetings/2024-10-24.mdx b/meetings/2024-10-24.mdx new file mode 100644 index 0000000000..020aeb205a --- /dev/null +++ b/meetings/2024-10-24.mdx @@ -0,0 +1,40 @@ +--- +title: "CDP Hackathon Project Demos" +description: "This overview highlights protocol updates, tooling demos, and ecosystem discussions." +authors: + - amisha-singla + - carsten-jacobsen + - george-kudrayvtsev + - nicole-adair + - shawn-reuland + - simon-chow + - sydney-wiseman + - tamir-sen +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/channels/897514728459468821/900374272751591424/1298362698123182080) + +The SDF Platform team held an internal hackathon last week to build applications that use the Composable Data Platform (CDP). In this week's developer meeting, team members presented their projects. The apps varied widely (trading app, fraud detection, JS browser-based ingestor, etc.), with the goal of showing how easy it is to use CDP. + +### Key Topics + +- Trading aggregation service built on CDP data streams +- Fraud detection using CDP (`Deceptiscan`) +- Multiple CDP “hackies,” including: + - General data indexers and payments indexers + - Smart contract expiration alerts + - Data indexes stored in DuckDB + - Torrent-based distribution for data indexes + - Ledger metadata extraction +- JavaScript, browser-based data ingestion frontend +- Real-time analytics pipelines and dashboards +- Developer discussion on ease of setup, flexibility, and experimentation with CDP + +### Resources + +- [CDP blog post](https://stellar.org/blog/developers/composable-data-platform) diff --git a/meetings/2024-11-14.mdx b/meetings/2024-11-14.mdx new file mode 100644 index 0000000000..83d89b2934 --- /dev/null +++ b/meetings/2024-11-14.mdx @@ -0,0 +1,30 @@ +--- +title: "Stellar Lab Demo" +description: "This overview highlights Soroban RPC, Stellar Lab, and transaction data." +authors: [carsten-jacobsen, jeesun-kim] +tags: [developer] +--- + +import DriveVideo from "@site/src/components/DriveVideo"; + + + +## [Public Discussion](https://discord.com/events/897514728459468821/1304859059425382553/1306725344870400000) + +This session featured a walkthrough of the new Stellar Lab, with a focus on recent improvements aimed at streamlining the developer experience when working with Stellar and Soroban. + +The demo explored how Stellar Lab supports common development workflows, including account setup, transaction creation, inspection, and interaction with Soroban RPC, highlighting both usability improvements and underlying technical capabilities. + +### Key Topics + +1. Activate the MultiSig exercise +2. Stellar Wallets Kit +3. Create and fund an account +4. Save transaction function +5. XDR-to-JSON mapping +6. RPC methods, including `simulateTransaction` + +### Resources + +- [Stellar Lab](https://lab.stellar.org) +- [Documentation page](/docs/tools/lab) diff --git a/meetings/2024-12-05.mdx b/meetings/2024-12-05.mdx new file mode 100644 index 0000000000..0172fe9032 --- /dev/null +++ b/meetings/2024-12-05.mdx @@ -0,0 +1,130 @@ +--- +title: "Trustless Work on Escrow Smart Contracts and AI Development Tools" +description: "A dev meeting spotlight on Trustless Work’s escrow infrastructure on Stellar—open-source, USDC-focused, and designed to abstract contract deployment and interactions—plus a live demo of using AI-assisted IDE tooling to iteratively modify and test a Soroban contract." +authors: [alberto-chaves, carsten-jacobsen] +tags: [spotlight] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This call features a walkthrough of Trustless Work’s escrow model, positioning escrow as programmable infrastructure that can be reused across marketplaces and other platforms without requiring deep blockchain expertise. The discussion emphasizes reducing payment friction (cost, speed, transparency) and enabling flexible release conditions for funds. + +The second segment shows how AI-based developer tools can help new Soroban builders iterate faster: generating code changes from plain-English prompts, updating tests, and using in-IDE explanations to learn concepts like storage choices without constantly switching contexts. + +### Key Topics + +- Trustless Work: Escrow on Stellar Overview + - Trustless Work provides an escrow infrastructure that abstracts smart contract configuration, deployment, and interaction, making escrow workflows easier to integrate and operate. +- Why escrow fits Stellar rails\*\* + - Global, permissionless settlement without intermediaries + - Lower costs and faster execution than fiat-heavy workflows + - Transparent, on-chain execution that supports automation and auditability +- Product Direction + - Primary focus on USDC to reduce volatility friction, while remaining compatible with Stellar-issued assets + - A “templates + developer tools” model to simplify escrow setup, customization, and integration +- Open source and ecosystem focus + - Tooling and escrow templates developed in the open to encourage community contributions and integrations + - Documentation treated as a first-class deliverable, enabling integrators to move quickly from concept to production flows + - Use of AI-assisted tools (such as code review and explanation assistants) to improve clarity, maintainability, and contributor readiness +- Adoption and integration patterns + - Marketplace and freelancer-style payment flows + - Early exploration of trade finance use cases, including cross-border counterparties and coordinated settlement + - Rental security deposits with configurable dispute and release logic + - Crowdfunding-style milestone releases using escrow-like “deposit and release” mechanisms +- Mainnet Readiness Considerations + - Emphasis on third-party audits and broader review for contracts handling user funds + - Transitioning from testnet validation to production-grade deployment practices +- AI-assisted Soroban development demo + - Using an AI-enabled IDE to generate Soroban-specific Rust changes through prompts + - Storing arguments in contract storage and falling back to stored values when inputs are missing + - Updating tests to validate storage-backed behavior + - Leveraging AI chat to explain Soroban storage types (instance, persistent, temporary) and when to use each +- Community Operations + - Experimenting with shorter meeting formats to improve engagement + - Moving sessions to Twitch to reduce technical friction + - Plans to introduce an email notification list for upcoming meetings + +### Resources + +- [Trustless Work](https://trustlesswork.com) +- [Trustless Work Documentation](https://docs.trustlesswork.com) +- [Cursor IDE](https://www.cursor.com) + +
+ Video Transcript + +[00:00] Hello and welcome to this week's Stellar Developer Meeting this new format where we are trying out twitch and we're trying a new format where we do shorter presentations. So let's see how it goes this hopefully this will go smooth. But it is the first time we're trying a new platform. So there might be some smaller hicups with my with me today I have Alberto, which I've had the pleasure meeting several times at different SDF events and he is going to talk about trustless work today. So please go ahead and introduce yourself and share your screen Alberto okay let me give me a second to present hello everybody my name is Alberto as he said I'm from Costa Rica I am building Cress work please + +[01:00] confirm. If you can see my screen. Because I have one monitor. So I'm not sure. But I think you do okay and yeah ER we are building trustless work is ESC infrastructure for the new economy. So basically to understand a little bit what I'm talking about basically in very simplistic terms an escro is a neutral secure way to hold money or funds. While the ass. While something is happening H the most common use of escrow, that you might know is real estate escros, which in this case we use a an agent or an escro agent like a bank to hold the funds. While the legal inspections and the paperwork happens for a house. But we also have digital escrows in the case of eBay Who as a Marketplace they hold the fun. While the + +[02:00] merchant actually delivers the product and same thing for upwork, which holds the fund. While the freelancer completes the deliverable but. If you try to build this today with Fiat payments it'll be next to Impossible and, that's. Because Legacy Astros as I call it have a huge problem, which is Fiat payments require banking they are very costly they're very slow they're very limited in flexibility they have transparency and they're obviously full of frictions, that. If you're here watching a seller call you understand the frictions, that fi payments provide. So not anymore at trustless work we're building the esro infrastructure. So basically we're abstracting the configuration deployment and interaction with smart contracts to create smart escros, which provide the same functionality as escrs and even more. Because they're cheap they're fast they're permissionless a transparent and + +[03:00] Global. And so in this way marketplaces e-commerce directories trade file and many more use cases are unlocked, that can actually use this functionality without requiring a blockchain expertise what we're building in trustless work is also an ecosystem. So we're building in an open source way or all the asro tools the templates and the dev tools so, that it becomes as simple as possible to initialize fund sign review approve and release payments over sell network we are focused on USDC, which is again it's the one, that brings less friction We Believe for volatility. But we can all already use this on any asset admitted issued on Stellar network some of our traction a little bit of the story we started working this in July we got into the Astro Hacker + +[04:00] House is sponsored by Stellar and Draper University, that's where we built the version one of the smart contracts the API and the documentation, which we launched on testet on a Costa Rican hackathon we also got accepted to the Stellar Draper U Embark program, which is where I'm at right. Now and we also participated in the only dust OD hack number 10, which allowed us to accelerate our development we got the escro version two done in an open source and the escro UI version one we also recently got awarded the an SCF Grant. So thank you very much for, that right. Now we're building the API version two. Because we're preparing for OD hack number 11 where again. If you want to contribute you're more than invited to contribute or another esro UI version too H some partner Integrations and there are some even research and + +[05:00] tasks, that we're going to add there H we're getting ready for demo day on Draper U, which is by the end of January and we are going to be launching on mayet around q1 2025 probably around March H trustless work I mentioned only dust and this is something really interesting, that is happening with trustless work is, that our hackathon projects the ones, that were in the in a tested launch have evolved into becoming a vertical on their own. So each one of these projects was part by themselves of the only hack number 10 they got contributors by the themselves and they got funds to initialize their development and mature their development. So directly or indirectly I think trustless work has over 70 developers contributing to the ecosystem and all of these projects are going to become templates for Integrations, that are going to make easier to integrate trustless work into + +[06:00] into all use cases probably and incoming we have a casino poker use cases who want to use escrow and trade F use cases one, that I want to stand out is KFI was able to build in a week crowdfunding H for social impact solution using our escros. So basically they use our escros as a deposit account where people can deposit funds and they the payment is released to the foundation by Milestones. So yeah we are getting into AI driven development also to accelerate how we are integrated in common use cases we are a core team of four and myself Alberto I'm the CEO and product lead I have experience as product manager even from Project since Corda to + +[07:00] Ethereum to exerp base and Stellar and Joel Armando and calev they are the development team they have been contributing to Stark net and Ethereum ecosystems, and now they moved into Stellar also. Because of this project and we're also backed by a community of over 180 developers from latam from Latin America H, which is called Dojo coding shout out to them they're probably going to be here and yeah, that's what we're doing in trustless work the this is a linked to my LinkedIn. If you want to connect I will gladly do so. If you want to contribute please do and yeah we're building again the escrow infrastructure for the new economy and I'm very happy, that it's going to happen on Stellar great thank you for the presentation I think it's very exciting + +[08:00] exciting how fast you have really been able to spin this up and already starting to get traction just a reminder are you on test net or are you on Main net right. Now we're on test net right. Now yeah. So yeah there's a lot of things we have to figure out ER be U before we launch a main with real funds H we won't make Revenue until we have we're on Main it. So yeah. But we don't want to rush, that we've gotten support from the community am from Draper and seller Ventures. So yeah we want to take our time with, that yeah. So so anyone who wants to explore what you're doing they can go to your website I have seen your documentation is pretty good. So I could actually start building on your platform right now. If I wanted to Yes, that's the idea yeah I've gotten really much into deil also I'm part of deil uni H. So I've been learning a lot about, that and yeah we're working on our docks and + +[09:00] making as simple as possible to go from zero to hello world. So yeah I think, that we talked about previously was open sourcing and can you share a little bit about your experience with open sourcing your projects sure yeah it's been crazy I'm from product management. So I am used to like over communicating things and features like to make things clearer. But being open source really has been like a great effort like to think about who's gonna come I don't know them they have no context on our product. So how we can make like their life easier and gladly our core team Joel arando and cev they have experience already you know us. So they have been in the other side of Open Source contributions also. So they were they knew like the best + +[10:00] yeah like the formats how to explain it h we're also leveraging AI also like I've learned, that Cod rabbit really helps like to start to describe H things and yeah look for improvements. But yeah I think, that the traction like how we were able like to build like I think by faes we got like the hackathon as phase one. So we built like trustless work was an API we got like these five teams building hackathon projects and after od10 like they're all like more mature like each one got like 20 contributors. So it's crazy how we were able like to start building this ecosystem, and now it's really interesting like to connect them like both, that we doing marketplaces to connect them. So they can start also accelerating their own and focus on different aspects of it h. So yeah I'm really excited on the opportunities and the possibilities, that + +[11:00] this provides like to build, that ecosystem and to start even mapping contributions like. Now we're moving everything to GitHub like even the documentation all the work I do. Because it's not you know it's hard to track even what I do. So now we want to track it so. Because again we want to be able to leverage, that as much as possible and yeah we're getting we're learning a lot and it's really interesting, that's great you mentioned in your presentation real estate. Because that's probably where most of us know escros from. But can you maybe talk a little bit about what kind of projects, that are interested in using your service what kind of Industries they're in. Because I know it's not just real estate companies yeah I've gotten a lot of interest from Real Estate, that's for sure ER I myself I explain, that we are infrastructure provider like we are a technical partner like we are not building for real estate. But they + +[12:00] can obviously leverage us. So this unlocks like for example for real estate agents or even lawyers to deploy their own escros and not require this banking agent, that may take three days two days and where's the wire kind of like experience, which is every what I get as feedback. But even more than, that for example a trade f is really interesting I'm from Costa Rica I mentioned earlier Costa Rica is a was just invited to join a trade Union with 14 countries in the Pacific. So it starts becoming really interesting like how are we going to handle payments you know with the new countries new economies, that we don't have like a lot of H Financial relationship with Malaysia Singapore Peru those kind of countries. So trade f is something, that's really interesting right. Now for us H. But also for example one of the teams buil used our ESR to create a security deposit solution for + +[13:00] house rentals for example. If you think about it like every time you do a security deposit in any Hospitality car rentals house rentals like it's always the card processor and you never know where the money is and it's always blocked for a certain amount of time and nobody's responsible for, that and yeah they created this. Because escr are configurable in, that sense they created an escrow, that again is release back to myself unless there is a dispute so, that's like security deposits and. If you look for security deposit technology there's like not much out there H. So it becomes really interesting like to start experimenting with this and seeing for example. If we do a plugin for I don't know Cloud BS or like plugin for jeta or do this in integrate this in Shopify and different types of interactions, that are becoming really interesting and again we can only aboard those by being open source and getting as much contri to get this band with. Because yeah. If it was + +[14:00] internal resources it would be impossible yeah and it sounds like it's not just crypto companies or crypto projects, that are interested in using your service actually maybe the opposite yeah and, that's Al also one of the main reasons we chose Stellar the anchor network and the unram and offrs partners yeah we don't we want them to be like have a good experience be able to send an a be able to send a wire it depends on the use case like even credit card or card processing. So so yeah there's all these partners, that we need to explore and start integrating or make it as simple as possible to integrate four people ER platforms we're going to use our technology. So yeah we're really excited for, that + +[15:00] what other one yeah it's mostly, that crowdfunding is becoming really interesting also like for Aid like. If we make it very simple to deploy like this kind of esrs to receive payments from globally it's really interesting for Aid also. So there are a lot of opportunities to abstract the blockchain and yeah become yesterday I said I was preing yesterday and they said we wanted to become like the stripe of escros and, which is ambitious for me and somebody corrected me and said like no let's do the Salesforce for esos. So so. So yeah, that's the kind of infrastructure, that we want to provide and I think a EST Stellar is perfect for, that I think we are in a great moment for, that stable coin adoption is like booming and yeah hopefully we're right on time yeah it's amazing to see what your building and how fast + +[16:00] you're doing it's really it's a huge inspiration also, that you really Embrace open source and I think, that's, that's you're really shown, that by embracing open source and putting it out there you get a lot of people playing around with it. So so yeah, that's, that's, that's great to see as well yeah the verticals can also like add like we have a fixed fee for trustless work like on escros, which is 3% and they can add an extra fee. So like. If they like they become the security deposits like product on top of us like they there is a huge business model to have. So like, that's why we're taking like such a small tiny cut. Because we want to provide like, that layer zero of infrastructure so, that all their products can come and build upon us and offer this functionality, which it's mostly like people have experimented with this. But it's mostly untested in a lot of niches use cases + +[17:00] and verticals all around like every time I talk to someone I get like 10 ideas of things we can build back and yeah only s open source is the only way honestly do you have any idea. When you will actually go live on mainnet according to our seller Community Fund Grant around March April is what we want we're targeting for, that yeah we're still like version two of the contract is already like the API goes after, that and like we have all, that kind of flow H. So yeah I think it's really feasible by March yeah I think it's also quick, that's it's only like three four months away. So yeah part of the Community Fund Grant also has this auditing H. So I'm also looking forward for, that because, that would really like help and + +[18:00] also, that's why open source makes a lot of sense. So as much eyes as possible own our contracts. And so on. So we get more feedback, which is one of the best validation techniques I think yeah I think. When when you're holding other people's funds the auditing part is super critical it's just having a third party verify your code go through your code and be able to say, that this, that our code and our solution has been audited by a third party I think it also builds a lot of trust. So yeah. So so yeah it's I think it's a great thing, that we have, that in as a part of our Community Fund program yeah it's really useful looking forward for, that okay great thank you so much it's always a pleasure to talk to you and it's super exciting to see + +[19:00] you're making and what you're building on Stellar it's I think it's an inspiration for everyone in the community thank you for joining sure my pleasure talk to you later bye okay. And then let me see. Then as a part of the new format we're trying out here we'll try to do the presentations a little bit shorter. And then have maybe two or three presentations per time this time I we don't have a second presentation. But instead I am going to experiment with a little bit with some live coding I have been testing out some AI tools, that can assist you in building smart contracts on Stellar and I think it's interesting and seeing from the perspective, that. If you're new to building smart contracts in soran or on Stellar. Then then some of these tools + +[20:00] can be a great help. So let's see. If I can share my screen I think it's loading here let see. If I can make it a little bit bigger yeah okay. So what I'm going to show here is just the hello world application the demo example app we have show how to run, that this inside cursor is an AI assisted IDE you can use for many different software language and just for the fun of it I tested it + +[21:00] out not, that long time ago to see. Because I was pretty sure it's probably, that's probably not support for soran. But I to my surprise, that was actually pretty good support. So I'm just going to go through a very simple example of how you can use it and I think this might be very helpful. If you're new to developing on Stell soran. So so let's just give it a try it looks very much like other tools you are you're used to using the it the ID is pretty standard looking and I have a terminal and I will just try to set up the hello world smart contract. So let's try, that and let's call + +[22:00] this okay. So what we have here is a new smart contract it's the hello world smart contract and we can try to open the code here and this is very much like what you're used to seeing in your IDE and we can do the same we can in other IDs. So let's just try to run this let's run a test oh I need to get into the right directory and it just takes a minute. And then it should hopefully run the test without any issues. While this is running the what I want to show here is I'm going to take this very simple smart contract. And then I'm going to add some function it to it what I want to + +[23:00] do is I want to store the value to, that is provided by the caller the invoker and. If there is already a value stored and we are not providing a value. When we invoke the smart contract so. If two is empty. Then I'll just take the last value, that's a very simple smart contract. But but it actually shows a pretty a good example of what you're doing like. When you're messing around with code for the first time or. When you're. If you're very new to it. So let's try, that out. So first I want to store I can see it is already remembered what I've done before, that's it's not supposed to do, that. But what I do here in the IDE is, that I press command and K I'm using Macbook. So it's command. And then I + +[24:00] write let's see let's say store to in storage. So I just type what I want to do. And then I click the generate button. And then it will make a suggestion and I can see, that this is not just some generic rust code this is actually soran code and I think this looks pretty good. So let's just accept, that. So so. Now we have a line here, that was generated by the AI, that stores the value from the string from two. When it's provided to our instant storage I can. Then say Okay I only want to use this storage + +[25:00] if. If the two value here. When we invoke the smart contract. If there's no value here. Then I want to use the stored value from the first time we ran it. So so let's try to have it do, that again I press command and okay. And then let's just say and again I just describe it. If if two is empty use two from storage and let's see what it comes up with okay. So it actually initializes a new in new variable and it checks. If two is has a length, that's equal zero and. If it is. Then we get it from + +[26:00] storage and. If not. Then we just return the one we got from the parameter from an invoking the smart contract this looks pretty good. So so let's just do, that and having this line of code here is actually fine. Because this. If if it's valid. Then we want to update the value in storage and. If it's just getting the same from before. If if two was empty. Then it's just restoring the same value so, that's, that's fine too. So so let's try to run this I'm not going to build and deploy it to test net I'm just going to use the test case and. If we go to test first I just want to store this so. If we see the test case here, that is provided it comes out of the box. When we create the smart contract + +[27:00] then. Then we provide Dev as a string. When we invoke the contract. And then we check for it. If it contains Dev but. Because we also want to test for the case where there's not a string When we invoke it let's create the test case for, that as well oh and let's like make this mutable. And then we just add an empty string here so. When we run this. Because we ran this one first where we stored Dev the dev string in storage we should still get Dev. When we run this test even, though we don't provide a string here. So let's try to run this + +[28:00] test there is an error. If we go back to the code here this way of setting what type of output we're getting is probably throwing an error. So let's just specify, that this is a string and let's see. If we can run it again okay. So it. Now completed so, that means, that first it both test cases here were passed the whole test file as a whole was completed and without errors so. When we provide Dev as a parameter first. When we invoke the contract we store, that in the storage. And then the second time we run it without the value and it + +[29:00] still gets dep. Because it's pulling it from the storage. Now I know a little bit more about storage on in sorond. So I know, that this is instant storage there we have three different types of storage we have the temporary storage and we have the permanent storage. So is this the right one to use and, that's a neat little feature, that I can use I can actually ask the IDE. If this is the right. source to use. So I use command and L and let's just delete these. And then I can ask, which storage type should I + +[30:00] use let's see what I get. So here we actually getting it says this is actually not a relevant example let's try to rephrase this is more a front end answer. If I ask is instance the right type seems like it's looking at seems it looking like I actually had it this is typical. When you do something + +[31:00] live let's see explain soran storage types let's see what yeah this was a little bit more along the lines of what it returned. When I was testing it out. But but I can actually hear in the IDE ask what the different storage types are and it mentions instant storage persistent storage and temporary storage. And then it gives me an example of how to use it. And then it gives me some more information I have actually had it reply to me, that. Because of the smart contract the functionality, that I'm building here, that I had pick the right storage type, that instant storage would be the + +[32:00] best type it was, that's what I was hoping it would show again. But but we can actually instead of having to go to documentation or go to other places to see and to learn more about it we can actually just use the chat and get some valuable feedback at least some of the times. So so yeah this was a quick example of how AI is going to help us build smart contracts as well I was really pleasantly surprised. When I saw, that it knows how to assist me. When I'm building smart contracts and I think it's a it's really relevant for especially developers who are new to seller new to soran, that they can get a little bit of help without leaving the ID. So so yeah I hope this was helpful and maybe you feel like trying it yourself I have just tried this tool it's cursor and it's a free version I'm using + +[33:00] it's not an endorsement or anything it's just the one, that I happen to try out. But but yeah try to play with it and I think it's great fun and eventually I think it'll be it'll help me to get more productive. When I'm coding okay, that's, that's all I had for today I don't know. If there's any questions we can take questions. If if there is anything I see Chris has been very active answering questions and commenting as well + +[34:00] yeah it's an interesting discussion okay. If if there's not anything else. Then thank you everyone for joining I hope this, that you like this format we will start we will continue to experiment and try different things and I think it's it looks like twitch is giving us a lot less technical issues than we've had in the past. When we've done these meetings on Discord. So so. So yeah we'll continue here and shortly we will send out and I'll share, that on Discord as well we'll have a sign up page where you can sign up for notifications so. When + +[35:00] when we plan a new session and we go live you can get an email notification, that was one of the suggestions, that we got from the session we did where we talked about some of our plans and what we would like to do and was looking for feedback and we are working on setting up an email list. So so you can subscribe to, that and get notifications. When when we have a new planned meeting. So so thank you everyone for joining we'll see you in a week bye + +
diff --git a/meetings/2024-12-12.mdx b/meetings/2024-12-12.mdx new file mode 100644 index 0000000000..fb9a8d993b --- /dev/null +++ b/meetings/2024-12-12.mdx @@ -0,0 +1,96 @@ +--- +title: "Smart Contract Wallets Passkeys and Soroban Limits" +description: "This overview highlights Soroban smart contracts, Soroban RPC, and Stellar Core." +authors: [carsten-jacobsen] +tags: [developer] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +In this meeting, the focus was on improving the Stellar developer and user experience through smart contract wallets powered by Passkeys, followed by a discussion on scaling Soroban through higher protocol limits. The first half explored how Passkeys simplify onboarding, authentication, and transaction signing for smart contract wallets, reducing reliance on seed phrases and passwords. + +The second half covered ecosystem-driven proposals to raise Soroban transaction and ledger limits. These changes are aimed at enabling more complex DeFi use cases while maintaining network safety, with benchmarking work in Stellar Core used to validate the proposed increases. + +### Key Topics + +- Overview of Passkeys and why they improve UX and security compared to passwords and seed phrases +- Smart contract wallets on Stellar using Passkeys for authentication and signing +- Passkey Kit TypeScript library for creating and managing Soroban smart wallets +- Client-side and server-side architecture using Passkey Kit and Passkey Server +- Creating new smart contract wallets and signing into existing ones with biometrics +- Invoking Soroban smart contracts through Passkey-based smart wallets +- Adding additional signers, including risks of unrestricted signers +- Policy signers for fine-grained, contract-scoped permissions +- Soroban transaction and ledger entry limits and why they constrain current DeFi protocols +- Proposed increases to read/write ledger entry limits at transaction and ledger levels +- Benchmarking methodology in Stellar Core to validate higher Soroban limits +- Increasing the maximum Soroban transaction event size and considerations for downstream consumers +- Introduction to the Soroban Limits Proposal (SLP) process and its future direction + +### Resources + +- [Passkey Kit GitHub Repository](https://github.com/kalepail/passkey-kit) +- [Passkey Kit Stellar Reference Implementation](https://github.com/anataliocs/passkeys-kit-stellar-ref-impl,) +- [Soroban Limits Proposal (SLP-1)](https://github.com/stellar/stellar-protocol/blob/master/limits/slp-0001.md) +- [CAP-0046: Soroban Smart Contracts](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0046.md) + +
+ Video Transcript + +[00:00] Okay yeah. But welcome finally we I think we got sorted out the issues and yeah welcome to this week's Stellar Developer Meeting and today I we have two presentations we have Chris, that's going to talk about P keys and we have S out who going to talk about increasing sorond limits. So I will just I'll just let you take it from here Chris do you need to share your screen yeah let me do, that okay cool is, that coming through yeah I think. So right okay yes I guess I can't let me see let me check the comments yes okay cool thanks Elliot cool yeah. So thanks everyone thanks for bearing with us through you know some technical difficulties here. But yeah let's get go ahead and get started. So this + +[01:00] presentation here is a subset of a presentation I gave at Devcon in Bangkok at several events including multichain day and funding the commons, which is a public goods organization, which is really cool. But basically we're talking about past Keys could kind of like I would say a revolution and user experience for web three onboarding. So yeah let's go ahead and jump in. So thank you than you all for joining future stack smart contract wallets and pass keys on Stellar yeah. So so basically yeah my name is Chris anatalia I'm a developer advocate for Stellar Development Foundation and let's go ahead and just dive in. So yeah we're just GNA give you a quick intro to like the what and why of p pasis. And then we dive into this really cool project, that Tyler built, which is basically a + +[02:00] typescript library, that helps abstract away the complexity of integrating pass Keys into your D app. And then we're going to actually go through a reference application where we actually implement this pass Keys kit and just kind of demo the functionality it exposes to you. So first off why pass Keys well everyone knows passwords are terrible right. But they've reached such a level of terribleness, that it's hard to ignore right. So I mean other than you know packs and leaks of password databases you know between, that and just the requirements, that users find very hard they basically users aren't most users are not following best practice of operational security just. Because it's difficult right. So pass keys to the rescue right. So pass keys are + +[03:00] basically faster easier and safer right and they kind of get rid of you know having to bage all these different passwords and having like it secure onetime passwords sent to you via SMS right. So pass Keys basically are enabled by well what's called a secure Enclave on I believe iPhone devices. But basically you have a special chip, that can do secure cryptographic operations right. So so basically your device kind of Acts almost like a UB ke not exactly. But basically your device acts as your source of authentication right. So you're able to leverage something like fingerprint or face ID to be able to register and sign in for accounts as you see here happening basically yeah you can either use your device or you can use something + +[04:00] like a password manager to manage your pass Keys. But basically yeah no more 13 character passwords and all of, that jumping through all those hoops and ultimately it is safer you know. Because it's not stored somewhere in a database, that a hacker could break into right the pass Keys kit is this really great typescript library, that you can pull into your project using we recommend you use PP npm for dependency management. But yeah basically it's a great Library you can find it at GitHub kale Pass Key kit and basically gives you all the functionality you need to create and manage Stellar smart wallets. So like I said before you can find it you just add it as a dependency to your project. If you scan, that QR code it'll bring you to the repo. If you want to check it out + +[05:00] but it's all GitHub you know you can just Google it as well. So yeah definitely check it out it's really cool. But yeah we'll go through it here. So basically the pass key kit allows you to implement signers you know various interactions with your mall with your wallet and also really cool things like policy signers, which allow you to Grant explicit permissions to a signer a functions as both a client and server Side Library with launch Tu on the server side, that kind of helps abstract away the complexity of submitting transactions to the network right. So so the client lib is Pass Key kit and the server lib is Pass Key server and you can check at Super Peach as another reference implementation + +[06:00] so. If we want to check out Pass Key kit a little bit right so. If you jump into the actual Pass Key kit repo and you go to sour kit. TS you can see like kind of the nelt and bolts of how it works right you pass in your RPC URL you know the static network keyphrase. And then you know it kind of set up all the stuff for you right. And then it'll expose these functions like create wallet, that basically you know allows you to perform all this functionality with basically just a function call yeah. So this is like the pass key kit client side code and with your in your actual application so. When you initialize your passy kit right you import it. And then U you create an instance of it right pass in your RPC URL of the public node you're using. And then you know as a seller + +[07:00] developer you know each You Know M net test net feature net they have this static network passphrase you know, that you pass in, for instance "test SDF Network ; September 2015." And then the deployed Factory contract address, which is the factory, that generates your spart contract wallet right yeah then, that will be have to, that will have to be deployed beforehand on your selected net taking a quick look at the P key server. If you dig it to server. TS you can see basically it's kind of the same thing you know you're taking it launch tube URL depending on, which. If you're on M dat or test, that. Then you also need a jot + +[08:00] token, which is u a Json web token, which kind of authenticates use launch tube we have self-served for, that on testet I believe. But for main, that I think you need to reach out to the team. But yeah so. When setting out the pass key server in your application right you pass in your RPC URL you pass in your lodg tube URL and you pass in your Lodge tube jot token right. So in this case we're importing it from a EnV file. So so, that stuff will just be sitting in your local.v config file so. If you want to follow along and you want to check out the demo app I built you can scan this QR code or you could find it at analo CS pass Keys kit Stellar reference implementation and yeah we'll said, that out later too. If you + +[09:00] don't catch it here. So basically the use cases we're going to cover is creating a wallet signing into existing wallet a smart contract invocation adding a ed25519 signer. And then adding a policy signer and also talking about some of those nuances so. If you follow along with the application. If you want to pull it down and just run it locally basically here is the first example right. So what we're doing here is we're creating a new wallet right and then, that wallet actually gets created as a deployed contract to testet as you can see here right. And then within the date of, that contract you see the cider. So basically you click create pass create new Pass key right. And then in a browser you I you can either use your fingerprint on your MacBook or + +[10:00] password manager right. And then the factory contract creates, that smart contract, that you can see in Stell expert everything, that's getting created right. So yeah just going through a couple times. So you see it all you know you create the transaction you, that gets submitted by launch tube right the factory contract creates your spart contract wallet right. And then and, that it's here for you to use in the future, that's linked to your pass key on your device right. So digging into, that code a little bit. So so the front end calls this create wallet function right. So we name the wallet we. And then we call passy Wallet create wallet right we configured this object earlier at the top of the screen right with the RPC URL and all those parameters right. So we invoke, that + +[11:00] create wallet function. And then we get the assemble transaction right. And then we use the pass key kit server to send, that transaction right. So just going through it again right. So we get the what the wallet should be named we pass in the key ID the contract ID oh no we get back all these fields what we call the create wallet function right. So we print out the transaction, that we just created. And then we use a pass key kit server to actually send, that transaction. And then we log the response of, that transaction right. And then you know all seller Cy to be funded. So we fund, that wallet as a final step. So also so. If you want to so. When we print out the transaction info you guys actually grab the envelope + +[12:00] XDR. And then you can go to laboratory. Sell.com and. If you want to dig into a lot of the details of what's happening in, that transaction you can use `lab.stellar.org` and kind of dig into some of the nitty-gritty of the oper the actual operations being executed yeah, that's available at `lab.stellar.org` XDR and in this reference application, that envelope X gets printed out in the console for you to grab right so. When signing into an existing pass Keys wallet right You can either use a key ID or you the browser could promp you right. And then so. When you sign into, that existing wallet, that's existing smart wallet pass Keys wallet, that you created you're able to do, that just using your fingerprint + +[13:00] for instance right. And then yeah. So it's like a really simple user experience you log in here's the smart contract, that we created previously, that represents your pasy smart wallet and yeah it's way simpler than managing a 12 12w passphrase you know much lighter much less friction onboarding new users right. So yeah you know a lot of this functionality is similar right. So to sign in very similar to creating a wallet right you either type it the key ID or it'll prop you for either your fingerprint or your U password manager managed Pass key right. And then all you do is you connect to, that pass key, that you already created. And then yeah you're + +[14:00] pretty good to you're pretty much good to go you know, that's, that's basically the whole authentication flow for invoking a contract. So so. Now we're we actually we've created this smart contract pass Keys wallet, that could actually invoke a deployed contract right. So we have we deployed this smart contract called math basically and all it does is it adds two integers right. So there's a do math function on the spark contract, that we're actually invoking using our Pas key wallet right so. When we call this invoke function right we are signing using our Pass Key our fingerprint creating, that transaction. And then having launch tube helps shoot off, that transaction to the network died Byer pasy wallet you know. So I mean the + +[15:00] actual operation is kind of trivial. But the idea behind it is pretty cool right it's kind of like the whole abstra account abstraction flow where you have a smart wallet, that could actually invoke a smart contract. So digging into the code, that's called there. So this do math function gets invoked right. And so we have this contract instant where we're calling the do math function right. And then we're going to sign it with our Pass key right. And then after it's signed we're going to use a pass key server and launch tube to actually shoot off, that transaction to the network. And then we're going to dig into the XDR of the response and see the sum of the two numbers we added right, that's a trivial operation. But as you imagine you can do basically + +[16:00] anything in spart contract function and basically this is a nuts and bolts of using a pass Keys wallet to sign a transaction and invoke another smart contract operation, which is really amazing right it's such a cool library, that Tyler built here it's. So like user friendly and easy to use it's pretty awesome. So let's say you wanted to add another CER to your pass key wallet right yeah I mean like, that's, that's definitely possible right you can just add you know any classic Stell account as a cider what's dangerous there is you're basically granting them access to do anything right on your account, which is dangerous right for I hope obvious reasons. But yeah + +[17:00] obviously like. If you want to Grant someone access to your account you want to Grant fine grain access where you explicitly control what you're kind of giving them. So so basically since we're passing an empty signers limit limits map we're not really con constraining the permissions we're giving them right so. When we sign this transaction and we set it at the CER we're actually allow the CER to do anything on our behalf for, that pasy wallet right so, that's obviously not ideal what's cool here is we're adding what's called a policy CER like I'm not sure this is currently on mainnet. But it basically the way I explain it is like it pro there's like a proxy contract + +[18:00] right. So basically you have to go through this proxy contract, which kind of constrains what abilities you're able to take on, that invocation. So so basically you're granting a very fine grained set of permissions to The Cider allowing them to perform a specific activity without giving them full access to your account right. So obviously, that's, that's I, that's way more ideal than granting you know u a blank check to a Aigner. So so for attaching a policy CER U you'll notice a difference here is we're actually adding in an entry to the signer limits right U and as you can see we're passing in the do math deployed contract hash. But then also another contract called do math policy and, that policy is kind of the proxy contract, that kind of dictates what + +[19:00] you're able to do are, that defined doe math contract right. So like the call has to go through, that policy first, which kind of constrains what you're able to do with, that cider. So this is kind of like I would say revolutionary I think yeah I think it's a really cool concept you know you're able to U kind of delegate to another signer very fine grain permissions you know, which I think is very powerful and yeah it's just a really cool thing, that Tyler built here right. So like yeah. If you want to check out some more Pass Key demos right Elliot he created this really cool dice roller game. And then is super peach the classic implementation there's a little demo built into Pas key kit repo itself right and. If you need a lot Tobe J token I think we do self + +[20:00] serve for test, that. But also just reach out to us on Discord cool yeah okay great thank you, that it was very interesting we actually did a similar presentation at Meridian at the hackathon and a lot of the teams chose to implement U pass, which was great to see. Because they had limited time to get their application built and they decided a lot of them decided to use pass keys and they got pretty smooth. So so yeah definitely go check it out I think it's a huge Improvement in user experience. When we talk about web three apps okay great I will. Now invite Z up and he'll talk about the + +[21:00] increasing soran limits. So please go ahead yeah can you hear me yeah perfect okay yeah I don't have a presentation today. So I'll just speaking to you. But Carson should be dropping a couple links to some documents, that'll be helpful yeah I'm my name is sart s I'm a software engineer on the core team and today I'll be going over a proposal from the ecosystem to increase some sorond limits and for cont from some context suron transactions are subject to various settings and Lins, that can be updated with the validator vote similar to our protocol upgrade. So there were three separate proposals to increase limits from the ecosystem and slp1, which there's a link in the comments below is a combination of all three and you can see the proposers in the author section of the SLP. So first I'll go over the Ledger entry limits. So the read entry and right entry limits are being increased for + +[22:00] both the transaction level and Ledger level limits the max Ledger entries, that can be read in a transaction is increasing from 40 to 100 and the max Ledger entries, that can be written in a transaction is written is increasing from 25 to 50. So these increases will allow DeFi protocols like Hoops Finance Sor Swap and blend to scale up and perform more complex transactions, that they aren't able to do today with current limits. So to make sure, that the network can actually handle the increased runtime from these increased limits we developed a command line benchmarking tool in Stellar Core, that lets us measure Ledger apply times using different Soroban settings. So the tool will generate a worst case transaction set, that uses as much of the resources available as possible apply those transactions. And then just records the apply time and the SLP has some more information and results from this + +[23:00] testing. So based on the benchmarking Stellar Core can handle the increase the transaction level minuts limits I mentioned as well as increasing The Ledger level limits to maintain the current 5x ratio between the transaction and Ledger level limits. Now the last limit being increased is the max transaction events size limit, which is the combined size of the events, that can be emitted within the single transaction and the current limit is 8 kilobytes and this limit is low enough, that some protocols like a squid router will run into the limit. When publishing payloads, that they don't want to manage offchain. So the proposal bumps it to 16 kilobytes a benchmarking this increase in Stellar Core. However isn't very valuable Stellar Core just adds the events to the transaction meta and writes it to a pipe. When I did my benchmarking and I increased the events limit I saw no hit to Performance cuz in seller core + +[24:00] specifically. So it's more relevant to hear from Downstream consumers to get their opinion on. If they can handle the increased theoretical load and I say theoretical. Because in practice we don't see large amounts of data emitted through events at the moment. So I guess our ask here is for feedback on. If any of you expect issues from this increase to the events limit. So all of the increased limits from slp1 have been apply to test met. So feel free to try them out and I also wanted to mention, that the SLP process is new and still being improved. But the end goal is to allow anyone in the ecosystem to propose new settings and evaluate them using the tooling I mentioned currently the tooling is still being worked on and we have we don't have a lot of documentation for it yet. But we're hoping soon, that all this will be good enough for someone in the ecosystem to go through the entire proposal and evaluation process without our help + +[25:00] so and the current SLP proposal format can be found at the read me in, that Stellar protocol link, that was dropped in the comments. So yeah, that's all I had for today. If there are any questions let me know great thank you yeah. If if there's any questions please drop them in the comments this is something, that we are going to try to experiment with going forward. When we do some upgrades and. change limits and things like, that just to have a quick talk about it here in the weekly developer meetings next week we actually going to talk about the Protocol 22 and some of the new features, that was launched there, that was it but, that's what we had for the meeting today thank you to both our presenters for taking the time to speak here and yeah I'll see everyone else next Thursday + +[26:00] thank you + +
diff --git a/meetings/2024-12-19.mdx b/meetings/2024-12-19.mdx new file mode 100644 index 0000000000..adaf1cab73 --- /dev/null +++ b/meetings/2024-12-19.mdx @@ -0,0 +1,128 @@ +--- +title: "BLS12-381 Building Blocks and CAP-59 SDK Updates" +description: "This overview highlights SDK updates." +authors: [carsten-jacobsen, jay-geng] +tags: [developer, CAP-59] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +In this meeting, Jay introduced the new BLS12-381 cryptographic building blocks added in Protocol 22 and explained how they enable advanced verification workflows on Soroban. The discussion focused on what the primitives are, why pairings matter, and how these host functions map into SDK interfaces developers can use today. + +Jay also demoed practical examples: verifying aggregated BLS signatures and validating a Groth16 zkSNARK proof on-chain. Along the way, he highlighted key SDK ergonomics (like checked vs unchecked operations and `pairing_check` design), plus current gaps and improvements still being worked on. + +### Key Topics + +- What BLS12-381 provides (G1/G2 groups, pairings) and why it unlocks encrypted/advanced verification patterns +- CAP-59 overview and the set of new host functions introduced for BLS12-381 support +- Hash-to-curve and domain separation tags (DST) for safely mapping messages onto curve points +- Field operations (`Fr`) for scalar arithmetic used by curve operations +- SDK surface area: `bls12_381` module and near 1:1 mapping to host functions +- Difference between unchecked point ops vs checked variants that enforce subgroup membership +- `pairing_check` approach (vectorized checks) to avoid expensive `F12` pairing result materialization +- Demo: aggregated BLS signature verification (multiple signatures verified efficiently via pairings) +- Demo walkthrough: generating a simple Groth16 proof off-chain and verifying it in a Soroban contract +- Expected performance costs (high CPU budget) for signature aggregation and zkSNARK verification +- Clarification on scope: on-chain verification is supported; proving remains off-chain and toolchain-specific +- Notes on current UX gaps (e.g., storing serialized point bytes, point negation conveniences) and planned improvements + +### Resources + +- [CAP-0059: BLS12-381 Host Functions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0059.md) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to the Stellar Developer Meeting this year and on the agenda for today we have two things we have Jay from the core engineering team they'll talk about some new additions in Protocol 22. And then he is going to give some demos. So I will invite Jay to join me here on stage hello can you hear me yeah I can hear you okay great hi everyone I'm Jay core developer today I'm going to walk you through the building blocks, that we introduced in protocal 22, which went live U this month a couple weeks ago specifically around the The cryptographic Primitives namely bs2 381 + +[01:00] so yeah. So the CAP for bs2 3d1 is [CAP-59](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0059.md), which was introduced I would say three or four months ago. But today I'm going to focus on just like a high level overview of what you can do with different types of functions introduce as well as the SDK interface and I'm also going to show a couple examples simple ones to illustrate how you can make use of these core functionality today to build useful applications. So let's get started first let me share my screen okay. So yeah walking through u a few things + +[02:00] so first just briefly take you through the CAP since this is not a protocol meeting I'll just be really brief and not spend any time explaining the details and nuances you can read more about them. So yes. So this [CAP-59](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0059.md) is about. So yes. So this [CAP-59](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0059.md) is about bs12 381. So the brs1 123 3d1 is a new curve, that's added. So the well it's not a new it's not one curve it's rather like a family of Curves U there's namely the G1, which Curves U there's namely the G1, which is the base field. And then the G2, which is the extension field and what's useful about this curve specifically is, that you can compute pairings, which is taking a point in G1 pointing G2 repair them + +[03:00] and. Then it Maps into a different point in a higher degree space. So at a high level what why is this useful you might ask. So just like cryptography we wanted to compute like we want to do computations and we want we don't want to do them in a plain manners right just like ecdsa you utilize the properties of eliptic curve. And then the particularly the discrete logarithmic problem is hard to solve meaning, that. If you multiply a point a certain times on the curve and get a different curve it's really hard to invert, that process to get the number, that you multiplied with, which makes it useful for private key and public key. So same principle here B2 C1 is just eliptic curve and it can + +[04:00] it can used it can be used for signatures, which I we show in our first example later on. But it's also it can do a lot more. So yeah. So the high level G of this is, that using the new curve you can do encrypted computation, that follows certain rules and these are the basic building blocks, which people use to build more advanced and interesting applications such as CK knock all right. So yeah. So here are the new host functions introduced I think the total is like 12. If I remember correctly there's a yeah the basic ones Point addition G1 Point multip multiplication with a constant MSM, which is basically a scaler like a + +[05:00] vector multiplication, which is yeah just multiplying a vector of number by a vector of points. And then add them up. So and. Then so these ones the hash to curve in map to curve these ones are. If you use. If you want to use a digital signature scheme like. If you want to map a random message onto the curve and map to a point perform the cryptographic functions on it you first have to take, that message and you have to Hash it onto the curve internally how it works very briefly is, that it there's a h this hash function sh 256. And then you also provide something called a DST, which is the domain separate Separation The Domain separation tag, which is just a tag, that you append to every message such, that your applications domain + +[06:00] separate from say like Ethereum's application like. So it's always good to have a unique DST for every application to avoid some kind of intricate attack. So the basically how it works is, that it just hashes like repeatedly hashes, that message into a certain lens, which corresponds to the length of the point on the length of the point on the curve. And then it checks. If that point is on the Curve and then. If not it add one and repeat. And then once it is on the curve. Then it multiplies by some co-actor to send this curve to the correct subgroup. So this is just getting a little bit into more detail is, that these curves obviously they are Prime like they Prime field. So the it's the numbers Prime field. So the it's the numbers on this curve are modulus of a on this curve are modulus of a large Prime in this case the prime + +[07:00] number is 381 bits long thus the name U BR is 12 381 and the subgroup is different subgroup is the number of points, that we pick or like the definition of the curve pick such, that these operations are defined. So the subgroup is a subset of all available points on the BS 81 curve. And then the for this particular case the subgroup size is 256 bit so, which is very convenient to be represented as a big U 256. And so yeah some of the host functions here actually. If you look at they're called f, which basically takes some U 256 vales. And then like add them together like these numbers represent the scalar in the + +[08:00] represent the scalar in the definition of this particular curve where the modulus of the scaler equals the subgroup size. So this is useful for manipulating the points like multiplying them. And then making sure, that this scaler is actually like represented by the points on the curve like yeah. So we just covered hash to curve. And then yeah. Then we repeat for G2, which is different group of points same thing here map to G map to G2 hash to G2. And then the important one or I guess the most interesting one, that the applications built off of is the pairing. So + +[09:00] so what the parent does is I'm. to borrow some other slides here. So this is a this is just like a onepage explanation for BL signature, which will cover in a bit in our example and here is just the definition of a pairing, which takes a point in the first group point on the second group pair them up and send it to a different group the interesting about the paing is, that it is bilinear, which means, that it is linear with respect to both the first U term as well as the second term. So so here you see pairing of P plus s is equal to pairing of P times pairing of s and same here on the pairing of s and same here on the second term Q Plus R you can pair them separately and multiply together. So the reason here is a multiplication + +[10:00] again this is just some details is, that. Because the paing result is sent to like a higher degree the 12th degree, that's part of the name BR is 12. So we need to get to the 12th degree extension field, which is a multiplic multiplicative Group, which means you can perform like multiplications instead of a additive Group, which is our G1 and G2. So just some details. But important fact is, that you can these relationships can hold. And then by the extension of it. If you have a u on the left side. If you have a p repeated eight times this is essentially multiplying the result eight times and then, which is equivalent to taking the this paring result the plain par result of p and Q and exponent take the exponent a. And then you can + +[11:00] move this a either you know on the left side or move it out it doesn't matter. So the pairing this pair relationship can be quite useful as we see in the example of both signature as well as a SN verification okay. So I think, that goes over well we just covered the FR, which is the field this is just a very simple like modular arithmetics. But with a very specific module like addition subtraction multiplication power inversion and, that's, that's all. So now let's look at the SDK. So how does this look inside the SDK, which is what you'll be interfacing with is so. If you look at + +[12:00] the SD there's a it sits inside the crypto library. And then there's a brst 381 module. So the all the functions are exposed inside the BRS 12 381 module. So the few additional functions like checking. If a point is in the subgroup. But overall there is a pretty much one to one correspondence between the host function and the SDK function oh and also one interesting to notice is there's a two addition method one is just a plain addition. And then the other one is the checked Edition. So so the reason for, that is. So remember what I said earlier was the BL 12 381 points are defined over a particular subgroup. But but. But just the addition operation + +[13:00] but just the addition operation can perform on any point on the curve doesn't have to belong to the subgroup. But in order for, that addition to be useful for our pairings and for our other like a SN operation like they have to be in the subgroup. But the subgroup check is kind of expensive so. If you have a contract, which does like a number of these additions and only you want to make sure the end result is on the subgroup. Then you can use the plain add function, which is not taking the check it's just point doing the addition whereas. If you wanted to be safe. And then you wanted to use the checked ad, which performs the subgroup check in the end + +[14:00] modifcation yeah these are all very standard yeah repeat for G2 parent check oh yeah okay one thing about the parent check here like I mentioned earlier in this equation this is a parent check it's check checking a pairing of two points equals some relationship of pairing like with a either like a result of a pairing multiplied by each other or like additional terms. But the pairing additional terms. But the pairing result itself is a point on a higher degree in a higher degree extension F12. And then to compute, that F12 is very expensive. So the parent check is intended such, that we can perform multiple pairings together + +[15:00] and. Then check, that this the end result equals to one, which means this pairing returns true or false. If it satisfies true otherwise it's false the benefit of, that is, that these intermediate result they can be performed on a much lower degree be performed on a much lower degree, which is much more efficient and only at the very end do you have to or you don't have to. But like internally the algorithm will send, that to the 12's order. And then do the comparison check with the U the number one in 12 extension. So the reason for, that is in most of the applications either in signature checking or in SN verification you checking or in SN verification you only need to do pairing to check certain relation hold like in a SN for + +[16:00] example you are checking, that your polinomial contains a certain structure by, that you are Computing some polinomial relations. And then you're checking, that the left side equals right hand side this relation hold. So you in. Now the point you hold. So you in. Now the point you actually need, that 12 degree extension Point all you have to do is to make sure the left side equal right side. So far I haven't heard of any application, that specifically needs the result of the pairing so, that's why we went with this design of just passing a vector of G1 Vector of G2 perform pairwise Che pairing. And then check the result in the end. If it's equal than equal to one. Then return true otherwise return false. So it's just a it's other line mass is the same. But + +[17:00] just you know something to be like it's not taking a single point pair with a single point. But taking a vector of pair with a vector. And then these ones we covered okay. So I think, that's it for the building block oh it took 20 minutes it's probably should probably speed up a little huh okay. So let's talk about the useful applications U first of all the applications U first of all the BRS signatures. So I think I went through this in one of the protocol meeting earlier. So I just keep it a little more brief. Because the other ones I think more interesting. So the BR signature the way it works I'm trying to find the paragraph, that does explanation. So yeah. So so here there's a explanation. So yeah. So here there's a there's some kind of secret key And then oh sorry this is a public key + +[18:00] okay. So the public key is some number times the point the generator point. And then the signature is point. And then the signature is the public key times hash of the message and by the parent property. If you just. If you do the public key and the. If you do the public key and the hash of the message. And then you move the Scala part around in the end you just you get a check, which is the generator. So the, which is the generator. So the pairing of the public key and the hash of message equals the pairing of the generator. And then the signature. So yeah. If you it's fairly straightforward the public key here sorry I don't know why you call PK it's actually just a secret key here you can move it appears on the left in the beginning + +[19:00] but you can move it on the right. And then this becomes the signature. So the left and right must hold. If this signature is valid and the good thing about it is, that. If you have the same message. And then multiple public Keys signing the message. Then you can essentially just aggregate all the signatures together and aggregate all the Public Keys together do this off-chain and all you have to do is just to verify one pairing. Then you can you know verify potentially hundreds of signatures all at once and this is only this is. If the messages are same but. If the message are unique. Then for every message like M number message there will be like M plus one parent checks so, that's the general idea of the UHS signature. So let's see the example. So so we have + +[20:00] here hope you can see the code here. So this one example is already in the s bomb example repo you can look at it today I try to keep it documented I think it's fairly straightforward in the it's a pretty much as increment contract. But the increment is requires an O, which is a for every time you increment it will check called check or. And then check some the signature payload and the. And then the public key. So inside here is basically just loading the aggregate public key loading your domain separation tag. And then construct the vector and call pairing, that's all, that is. And then you can look at the test, which is doing most of the setups here it contains 10 pairs of signatures + +[21:00] applying the aggregation and signature aggregation. And then here just runs the test, which invokes the check all with some random payload. If I run, that yeah the signature passes. And then it dumps out the budget. And then yeah. So this simple operation is like 31 million CPU instructions. So yeah remember this is 10 different signatures on the same payload. So 31 million to verify 10 signature you can do also more. If they more so, that's a very brief walk through of the V signature. So through of the V signature. So now I want to go and do something. Now I want to go and do something new and the potentially interesting, which is the cross 16 verifier. So in + +[22:00] which is the cross 16 verifier. So in order to show, that we have the building block to build real ZK applications I went and generated a very simple proof. And then Ed a very simple impementation of the grass 16 verifier to verify, that proof and I want to walk you through it. So I don't know. If we have enough time for me to actually. So do we have time how much time we have yeah take the time you need yeah okay yeah. Because I time want to just walk you through the steps, that's required to generate the proof and see it in action. Because a lot of this is looks like magic. When you first run it like just dump dumped a + +[23:00] bunch of things. So okay. So here don't know. If you can see the link here. But I'm basically using circum U circum doio. If you just search for it at the installation the getting started guy. So circum is a tool. So circum and snjs is a pair of tools one is for defining the circuit, which is a arithmetic relation of what you're trying to compute. And then snjs is a tool, that allows you to take in the circuit and do all kinds of computations and generate the proof and also verify the proof. So what we're doing is we're going to just follow this exact guide here generate Define the circuit do all the setup necessary generate the proof. And then once we have the proof we will + +[24:00] write our own contract to verify it in soron instead of verified in snjs okay. So let's see okay going to make this just screen all right hope you can see my both of my screens. So so I did little bit of preparation. So here's a brand new folder I have these two files these are the exact instructions here it tells you to copy the input copy the circle I just did in advents. So we don't have to go through, that. So the first thing it wants you to do is to install these dependencies, which I already did. So we can skip, that and the second one is, that + +[25:00] it tells you to Define the circuit in the circum language the circle is in the circum language the circle is really simple it's the simplest you can get basically multiplying two numbers. And then output the result of the multiplication. However the interesting part is, that these inputs A and B are private inputs. So in the end your proof will prove, that this computation was done correctly without revealing What A and B is. So only the only public output will be C the number the result of the multiplication. So what we did here is just copy this exact program into this multiplier + +[26:00] file. And then the next one compiling the circuit. So yeah. So scales gives you the Circ instructions how to do, that. So this is basically just writing this circuit in some formats and so, that it can be understood by the other tools. But there's one thing we have to change on this command, which is BRS 12381. So the reason, that we need to specify this is the circuit compilation needs to know the range of like the range of prime number. So it needs to take the prime number the 3 381 one bit number associated with our curve right + +[27:00] associated with our curve right this B 12 31 and use, that as a this B 12 31 and use, that as a curve to generate this circuit otherwise it I don't know what happens, although I haven't tried. But you should do, that. So once you do, that it will generate the multiplier in this R1 CS format. And then some other stuff. So the R1 CS is a rank one constraint system why is it called. So so think of any why is it called. So think of any complex computation like a know a squ complex computation like a know a squ \* Bal C something like, that you can always break it down into simpler computation by creating intermediate Gates. And then assigning intermediate gates to you know more intermediate. And then like in the end you just compute two things to an output. So so the process of R1 CS is just comp deconstruct a complex + +[28:00] computation in something into something simple as a left hand side times simple as a left hand side times right hand side equals an output whereas both left hand side right hand side and output are rank one, which means the highest polinomial degree is one. So you can have like a plus b. But not a square so, that's what it is okay once you do, that the next step is to compute the witness. So at this point we have the circuit, which is our structure of our computation. But in order to compute it like in order to be verifiable we have to comput it with some inputs. And then generate the proofs and all, that. So here is just defining the input I did here defining the input I did here, which is exactly what it's using. So it Tak 3 \* 11. So the output will be 33 + +[29:00] but we don't we won't know about this three and 11. So once we have, that we will enter the JS Library okay it's empty right it's generate yeah some other AO generated code to compute stuff. So what we do here is actually copy this one. But I'll input this some okay. So this one is just generating the witnesses. And then you can also do this with C++. And then once you do, that you have the witness file I don't think we can load it yet. But it's just a yeah the witness is just the + +[30:00] yeah the witness is just the input basically like I think the witness is it private or just probably I don't I forgot the exact terminology. But it's possible, that the witness is just a public output U. So so. So only the C part 33 is contained in the witness okay. Now comes to the proving part. So yeah. So so here there's a part. So yeah. So here there's a bunch of commands. If you do it for the first time it will look fairly weird. Because it's telling you to generate a power to ceremony. So what it really is. So in order to what it really is. So in order to compute the proof. So okay. So remember we have the R1 CS file. And then the R1 CS is just a system with equations of different nodes. And then every equation is just a simple + +[31:00] rank one left right equals output multiplication and. And so the multiplication and. And so the next step in order to make, that into some kind of provable structure is to convert this into a qap, which is a qu quadratic arithmetic program what it does is to on each side the left right and output it fits a number of polinomial, that evaluate to at certain points. So the number of equations is a number of constraints and at each constraint the polinomial will evaluate at a different points. So to keep them separate. And then a pol polinomial is the aggregation of smaller Pol polinomial defined over each unique node. So what you think in the and is just a instead of left times right + +[32:00] a instead of left times right equals output you have a left side polinomial of some degree which matches the number of constraints and same for right hand side and the opposite side and in order to verify this and in order to verify this polinomial computed correctly we. So here's a key concept U is in order to verify the computational polinomial all you have to do is to verify the computation of a polinomial at a single random Point. So this may sound little you know counterintuitive in the beginning but. If you have polinomial of degree two. Then there can be only two roots and then. If you have two polinomial of degree two you subtract them together it can still only have two Roots so, which means, that it can the two polinomial can only be identical at n + +[33:00] number of points where n being the degree of the polinomial. So even. If we have you know hundreds thousands of constraints in our circuit the space the total space of our SN the prime modulus or rather the subgroup is 200 56 bits long. So it is impossible for any Collision to happen in real chance. So this a happen in real chance. So this a key concept of how to like how to convert this polinomial into a u into a non interactive check, which is you just need to make sure, that it's evaluated at a truly random value. And then this the verifier or anyone can just take the output of, that. And then make sure. And then check the verification, that the polinomial equality holds with pairing what we introduce here. So this is what it does. So so the power of + +[34:00] this is what it does. So the power of to here the long story short is just the steps necessary to generate, that random point at, which the polinomial are evaluated, that's really what it is. So I'm going to spe speed around through these steps here it's not really, that interesting there's a lot of output you can look at it later sorry B 12 31, that's the first step contribute to ceremony some random text it's going to compute set up some ceremony. And then the gr 16 requires a public a Universal setup, which is for the whole like you have to do once + +[35:00] and. Then the per Circle specific setup so, that's the second step here + +[36:00] so this was the last command prepare phase two. And then yeah just set up the circle specific stuff okay. And then contribute to the another text okay. So now we have the proving key and the verification key set up for this circuit and the next step is to generate the proof okay. So in the end we have this proof + +[37:00] yeah, which contains ter terms, which is like encrypted evaluation of polinomial at some point. Then there should also be the public. Json do we get, that oh I think I sent it to the previous folder. But yeah the public should just have one value here, which is 33. So to verify a proof to. So what we need to do is a proof to. So what we need to do is to write a contract this here it makes really easy for you to do. So there's a step where you can call to generate a Ethereum u Solidity contract for verifying. And then + +[38:00] four yeah okay. If we just look at it here yeah. So the cont itself is it's not, that interesting I'm not familiar too much with Solidity. But most of the logic here is just packing bits into memories. And then in the end it's called this caring on you know a bunch of terms. So we can easily convert, that into a Sor Bond and the output of you know our contract is just this much it verifies the proof takes a verification key, which is some like G1 G2 points. And then approve, which contains also points in G1 G2 public signal in this case just one, which is 33 encoded into the scale a field number. And then aggregate the input + +[39:00] and. Then yeah construct the back to check paing and, that's really it. And then I did I copied all of the outputs it's a it will be a different one than what we gener earlier. Because there an entropy I. Because of the random randomness. But but I did this random randomness. But I did this earlier I just copied all these the pro the proof part and the ver. If ification key part this is proof. And then this is the VK. And then construct the key construct the proof. And then just call the verify proof and assert, that this result must be true. Because the we've putting you know the output at 33 so, which is matches our public output. So let's run this test okay. So it finished and you can see, that this verifying the ZK proof takes around 30 like 41 million CPU + +[40:00] instructions it performs four parent checks per some addition multiplication in G1 space so. If you have more public input. Then there will be more multiplication and additions here. But the pairing is only there's only four pairing. So the total CPU is roughly this amount yeah I think that's it sorry for taking longer than expected. But this is it and hopefully, that you can see, that this is you know exciting stuff, that you can use to build DK application today you may ask what's the use of it you know the real interesting part is in the circuit interesting part is in the circuit part. So instead of proving you know a b and c you can write a much interesting one like you know proving hash like comput computation of a hash + +[41:00] or knowing a pass in a Merle tree, which is like proof of possession or something like, that. So all the Innovations you know a lot of it can happen on the Contra side, which is what you guys you know are, that stuff. So yeah really looking forward to seeing what comes out of it and yeah let me know. If you have questions we also have a Discord threat feel free to post it there and I'll hand it over to cin great thank you so much this was really good I think the examples and walking through the code really made it U easier to understand and see potential with. So I know we have one question let me see Elliot was asking a question earlier about. If the host functions are all, that's needed for CK snarks on Stellar or. If there's still additional functionality, that's missing I don't know. If you have an answer for, that yeah + +[42:00] to verify the SN it's all we need okay. So there are some quoes okay. So there are some quoes in SDK, which we build it like in SDK, which we build it like in the like we provide the most generic like interface to expose this for. But there's a little bit corks like you like for example ear U see in order to try to store, that I have to store the Bice instead of the G1 G2 but, that's the fix and also to compute the negation of a point you have to do it on the contract side. But yeah in terms of verification all the tools are there. But there may be some ux gap, which I'm discovering and should be fixing soon and also with your guys help we will be able to make it better yeah. But in terms of verification, that's it. But in terms of pro proving. So as you see proving has to happen offchain and then, that's quite elaborate toour set + +[43:00] and also like different Suites of you know there's different Frameworks of U like Marin you know gr 16 Plank and all the other tools, that you and all the other tools, that you can use. So we haven't provide any of, that and we don't endorse any of it I was just using the snjs as a example. Because that's one of the you know most well known. And then yeah, that also the public ceremony aspect yeah we don't have it on chain. So but yeah you can verify it. But not there's no proving tour CH available at the moment okay great any other questions feel free to put them in the chat I don't think, that's the case we are kind of running out of time. So + +[44:00] I will save the walk through of will save the walk through of quick start for the next meeting. But yeah. If there's no other questions. Then I think U we will stop it right here let's see yeah okay well thank you Jay thank you for your presentation it was really good I think going forward. When we add new functionality in protocol. When we upgrade the network I think it's to maybe spend some time in these meetings to go over some of in these meetings to go over some of the new functionality and some of the things we can do as a developer. So yeah look forward to more of. So yeah look forward to more of this kind of presentations. But thank you J Jay and thank you for everyone who joined have a good holiday thank you guys happy holiday bye + +
diff --git a/meetings/2025-01-16.mdx b/meetings/2025-01-16.mdx new file mode 100644 index 0000000000..cc999bdde8 --- /dev/null +++ b/meetings/2025-01-16.mdx @@ -0,0 +1,151 @@ +--- +title: "Ecosystem DevRel Year in Review and 2025 Plans" +description: "This overview highlights Soroban smart contracts, automated market makers, and network fees and resource limits." +authors: [carsten-jacobsen] +tags: [developer] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +In this special edition of the weekly Stellar Developer Meeting, the Ecosystem DevRel team met in person during an offsite to reflect on the past year and share what they’re focused on next. The conversation emphasized how Soroban’s first year on mainnet, improved onboarding tooling, and stronger ecosystem visibility have reshaped what it feels like to build on Stellar. + +The team also discussed practical priorities for the year ahead—growing developer adoption, increasing on-chain activity, and making core tools easier to use through better docs and educational content. Several “fun-first” experiments (like on-chain games) were highlighted as a way to drive engagement while still teaching real Soroban patterns. + +### Key Topics + +- Team introductions and highlights from the past year (Soroban launch, Meridian, ecosystem momentum) +- Documentation efforts to move and expand Soroban docs within the main Stellar documentation site +- Focus metrics for the year: growing monthly active developers and increasing daily contract invocations +- Using Dune analytics to track Soroban activity and interpreting spikes driven by “fun” on-chain experiments +- How lowering friction with tooling can drive usage: + - `LaunchTube` enabling fee sponsorship and simplifying transaction submission on mainnet + - Passkeys and smart wallets enabling smoother onboarding and signing with biometrics +- Plans to make adoption easier via clearer documentation, guides, and examples around key tooling (Passkeys, `LaunchTube`, smart wallets) +- Interest in expanding “playful” ecosystem apps (games/activations) as educational demos and catalysts for chain activity +- Discussion of fine-grained permissions and automation ideas: + - Policy signers + bots/agents for safe delegation (e.g., automating repetitive DeFi actions) +- Ecosystem reflections: + - DeFi growth and visibility improvements (e.g., making Stellar/Soroban activity more accurately represented on major dashboards) + - Supporting builders through mentorship/incubator-style programs and helping new teams ship quickly +- Community Q&A topics: + - Upcoming event activations (teased) + - Dev meetings being uploaded and organized via a YouTube playlist + - Core team engagement primarily via Discord for discussions and feedback + +### Resources + +- [Soroban Daily Contract Invocations Dashboard (Dune Analytics)](https://dune.com/stellar/stellar-smart-contract-analysis) +- [LaunchTube repository](https://github.com/stellar/launchtube) + +
+ Video Transcript + +[00:00] Maybe with, that box a little bit this way I don't it makes much difference. But okay hello everyone this is a very special edition of our weekly meeting with me today I have the entire deal team. So hello and to kick it off I'll hand it over to Nicole hello everyone I'm. So excited to be here with the entire deil team we are in San Francisco having a deil offsite and talking about all the really cool fun stuff we have planned for this year. And so we thought. While we're here all together we should you know get together on this call and say hi to everybody introduce ourselves probably and talk about a couple of things we really loved last year and some of the things we're excited about this year. So I'm really excited to + +[01:00] here and I'm excited to have everybody here with us let me see I guess I'll kick it off. So hi everyone I am Nicole I am the manager of the deal team I've been here for almost two years at SCF and I guess let me start with what I loved about last year. And then I'm going to go around and let everybody else introduce themselves. But last year two highlights for me were Protocol 20 soron sorry nobody else can take, that and Meridian ha Meridian were really awesome it was really cool and fun to just be there and like see builders and see all the amazing projects, that came out over like 24 hours and it was really awesome. So I'm going to go this way to LA + +[02:00] hello everybody I'm Elliot I'm your friend I'm. So excited just to be aart I've been around Stellar for like a long time and I still just feel. So lucky, that I get to do this like for work and it's amazing one of the things, that there's what we are excited to do also not yet yeah + +[03:00] saying it's frozen dead for me we're Frozen oh yeah the frame rate looks kind of janky can you hear us weird okay they said we're back okay yeah the all right nice are yes monitor yeah I'll just do this the whole time okay. So they did see me waving all right oh they want to zoom in I don't know how to do, that we can do, that control camera frame people we're experimenting live really bad hey you don't know how these cameras work real tight on Ty yeah hey + +[04:00] everybody I'll move to like whoever's talking. If you just let it do this thing all right let me braing people okay Elliot are you done yeah I'm done sorry okay go ahead okay hi everybody my name is Bri wild I'm a writer on the deil team I've been around for three years and I love it here very much I know Nicole took the Sor Bond launching, which is a very exciting thing. But so one thing, that we did last year, that was it's not very glamorous or fun. But we Elliot and I worked a lot on bringing the soron documentation to the Stellar documentation site. So lots of docs work. And then making sure everything is yeah updated in the documentation or yeah soron being on + +[05:00] Stellar and something, that I really enjoyed is kale so. If you all are not farming kale you should be and yeah, that's probably like one of the most fun things, that we've done she's just saying after she's on my lead board I Am Number 10 on the leaderboard. So she do address so, that's, that's it for me. So hey everyone I am Chris natalo last year I escaped the Ethereum EVM ecosystem and joined Stellar in July and yeah definitely not looking back I am biased. But the community here is like way more authentic way more legit. If if you go to like most Solana or Ethereum discords people are asking. When air drop you know. But in the Stellar + +[06:00] ecosystem people are like really helping each other people are building real things people are just really excited about like actually authentically you know building on the ecosystem so, that's a really awesome change for me I guess like last year I think one of the things was like putting Stellar back like on the map you know we had big presents at a lot of major events like Decon and people are talking about Stellar and yeah we were getting a lot of exposure on the like in media in the crypto Twitter sphere yeah just see lots of great activity around Stellar you know with we actually just fixed fa it is working again we're actually at the dev offsite right. Now talking about. When she + +[07:00] C 2.0 we will let you know soon. But yeah just thank you all for being a part of the community and yeah my name is Carsten; I'm a developer advocate in the dev team and I joined in July last year and I would say the one reason I joined was San. But at least, that was why I got super interested in Stellar I think adding smart contracts just made a huge difference for me it was all of a sudden a really interesting chain you could build whatever you wanted the sky was a limit. So I thought, that was a really interesting time to join I love to see what developers are building I love to help them in any way I can Meridian was a is was super exciting last year getting to meet a lot of the developers yeah lots of interesting talks it was + +[08:00] super great experience to network with a lot of the builders on and San yeah Tyler hello everyone I'm Tyler I'm also a developer advocate here I've been here for five years last year a highlight for me was kind of well we knew pretty early on, that we wanted to add the SE p256 R1 signing curve, which doesn't mean anything to really anybody unless you maybe a little bit more Technical and understand like, that's, that's what the signing curve, that's used in for p keys. But taking, that like journey of super technical all the way through to. Now you can sign transactions with your face or your fingerprint and having to build all the technology to facilitate, that happening, that was a huge highlight for me I've also I have a long history on trying to give people XLM and it's very hard to do + +[09:00] without having to go through a whole long process of kyc and collecting tax stocks and like I just want to give away XLM and let people play games and launch tube was a huge I mean it kind of killed two birds. When we don't kill birds it what did we say fed two birds with one seed where we can with a launch tube token I can give away a launch tube token without having to do any YC let me do any tax do collection and allows developers to submit transactions on Main net using the XLM, that's held within, that launch tube token. Because they can't go and farm those XLM out of, that launch tube token and take it and do nefarious things, which is why we have to do kyc and collect tax stocks and, that is awesome. But then also there's the problem of. When you actually want to do something on mainnet yeah you got to like source XLM to pay those fees and it's like students on sell is really cheap. But you still need + +[10:00] to have, that XLM. So it doesn't really matter how cheap it is. If you have to like create a coinbase account or a binance account and figure out a way to like connect your bank. And then you got to wait and, that Journey from I want to go to mainnet to actually being able to do, that is pretty tricky until you introduce something like launch, which basically lets you get transactions on chain for free and it handles all of the coordination of paying the feed simulating the transaction super neat Service was really excited to get, that online last year and it's kind of been something, that I've been trying to press and work towards for a long time. So those are definitely two big highlights for me from last year awesome all right what about this year I get to go first, which means I get to take the best ones. But no I. So this year we have a ton of you know I plans and ideas and we're g to share + +[11:00] them as like they develop. But I think like for me as we're at this offsite as we're thinking as we're making some of those plans I'm really excited, that like this is the first year obviously, that we're starting off with smart contracts on Stellar like obviously we had all of last year. But like we can hit the ground running this year with smart contracts and there's a lot of really fun stuff I think we can do and, that we want to do and, that we're planning to do and I think I am GNA share my screen. Because that we're looking for, that we're + +[12:00] check can you hear us audio back my check all right. So it's all right I stop talking confirmed can you hear me all right let's just keep it here we'll share, that link got it yeah okay. So yes his audio goes with me screen share okay big takeaway more monthly active devs coming in sticking around, that is one of our big numbers number go up and I think it's really + +[13:00] exciting. So number one Tyler number two yeah the other number, that we're trying to press up to the roof is daily contract invocations I think I can share a link since I'm not going to be able to maybe screen share yeah can we can't do waiting is, that one right there. So there's a dune analytics this is the graph, that I'm most interested this next year is pushing his daily contract invocations off I want to see Soroban be very active there's a lot of different ways to do, that there's a lot of reasons, that you may want, that to happen. But the technolog is awesome and. If you're looking at, that graph you can kind of see, that it hasn't been used a whole lot I mean we're hitting an average maybe before November of about 2,000 or. So invocations per day, which isn't terrible. But it's certainly + +[14:00] there's a lot of room to grow there. And then you can see this massive Spike, that happens in November December so, that triggers off. When Enrique launched his fcm proof of work contract, that I think was one of the first like fun things goofy things to do on Soroban, that to me really pointed to this punch of. If you introduce some fun things to do on. So not just the serious grownup blockchain things. But just fun things to do goofy things with stories and lore and what not, that you can actually get a lot of activity. And so I also built a improve of work asset, that addressed some of fcms just had a different philosophy of how to grow and introduce people to the platform and, that's where these numbers really start to take off where you have a minor, that you could use the CLI for, that's kind of, that first time. And then I introduce the website for + +[15:00] farming kale and it just kind of goes through the roof you can see it's starting to taper off. Now these fun activations often have a pretty short life cycle where they Spike up people have fun. And then they're ready for the next thing I've likened it to kind of like going on vacation or going to a water park like you're not going to stay there for the rest of your life you're going to go there you're gonna have fun. And then you're gonna be done and you're going to go home. And then you'll do something fun another day, that's sort of kind of things, that I like to work on are flashes in the pan lightning in a bottle you work on something, that's fun see what works what doesn't. Then you iterate work on something else. So I definitely am working on my next big idea to try and keep pushing these numbers up and I kind of my challenge to the developer ecosystem is to also think of fun things to do on chain and make 2025 a year where we're really trying to push up these daily contract invocations, which make the chain look very active and + +[16:00] engaged so. When you combine those two things of monthly active devs and daily contract invocations those are really good numbers those are numbers, that will attract more and more attention activity beets more attention and activity. So combining those two things together I think will help us win over other change, that maybe. When you start to dig into the ecosystem a little bit more and you do want to begin to do more serious things it's quite hard to do. Because the those relationships haven't been built with traditional web to technology or traditional Finance we've solved, that problem. But we need to solve the attraction developer attention and just getting developers to build stuff on chain. So there's lots of great things you can do, and now we just need to bring the attentional build the water part as it were one little fun side I can't share my screen. So I can't share, that one. But I think it is really fascinating on this particular graph it says, that 1.5 almost 1.5 total smart + +[17:00] contract invocations 43% of those came from launch tube. So almost half of all of our smart contract invocations went through launch tube, which to me again is pointing to this idea, that. When you make it really easy for people to submit transactions on chain without having to fiddle with XLM, that combined with the smart wallets where you don't have to download anything you know there 24w passphrase and all this sense and you can just click a button and you're on you get a lot of users like kale has only been around for a. While it's also like what are we supposed to do with this game like it doesn't really make a whole lot of sense. But people love to play it make something, that's maybe you know a little bit more interesting this number will just continue to go up and up. Because we have these tools like the pass key kit smart wallets and launch tube we're getting on chain and doing something on chain is very easy for users. So now, that we have those tools in + +[18:00] place it's time to build. So we just need to build fun things sweet awesome. So monthly active devs invitations per day those are our two main goals as a team again I'm really excited about the fact, that we can hit the ground running in this like new exciting way and, that we've got all these resources these you know tools, that Tyler's built, that can like extrapolate out from build upon as a team and as an ecos in general what is everybody else excited about. So a lot of developers tend to have brilliant and creative ideas and my strategy is to wait for them to do, that. And then snatch it. So Tyler has done a lot of really cool stuff or a lot of really amazing tools and projects and just really creative and Innovative things and my plan very soon is to try and break some + +[19:00] of those things I'm going to build some projects on not like on his I'm gonna break my own duplicate or modify or build on top of the things, that he's building like a taale marketplace where you can exchange different produces maybe depending on how much you're willing to stake different invokers contracts for, that kind of stuff and just generally having fun messing around with the things, that he built and trying to show other people how they can do it too sweet awesome I can go next oh Carson go ahead guys I'm actually going to talk a little bit to what Elliot said and what Tyler said. Because I think for Elliott to become successful in Building Things based on launch tube keys and all these things just like any other + +[20:00] developer I'm sure he would appreciate a lot of good documentation showing how to use all these great tools and, that's something, that I'm excited about taking some of the really cool ideas and cool projects Pass key is a game changer in my opinion. But we need to make it easy for developers to adopt, that also OT. So it's easier for him to do all his fun projects you can just go and read the documentation. So so, that's, that's actually something I'm really excited about getting these Technologies more accessible for developers make it easier make their adoption time faster they we need to get these brilliant ideas out there and have them used by developers they should not just end up in a repo somewhere they need to get out and get to work so, that's what I'm excited about, that I can take some of the ideas, that Tyler has and some of the projects he's built and + +[21:00] document, that and show develop us how they can do something similar and hopefully, that'll Inspire them to build something different. But maybe based on some of these ideas. So so yeah I just felt it was kind of a little bit natural to jump in here perfect cool yeah. So I'll good next yeah. So yeah I feel like the outfit in the room for all of web 3 has always been self- cust is a non-starter for most users it's too complex you know honestly passwords aren't secure you know the number one password is one two three four five six seven you know. So pass keys are a game changer you know and all the major companies are using it are implementing pass Keys you know GitHub Google you know. So once you lower this onboarding friction people are going to start coming into it you know and. If and once you lower the development friction + +[22:00] people are going to start building you know. So with pass Keys kit with launch tube with all these amazing tools, that are facilitating and enabling developers to build it's going to unleash the floodgates and what's going to come through those floodgates I think it's gonna be a lot of DeFi protocols you know like some people say TBL is kind of a vity metric. But honestly I think in web 3 DeFi is going to enable whole new business models right we for instance in web 2 the only two ways you monetize are ads and cers. So like once you have DeFi as a foundation you enable all of these new amazing use cases on Stellar and, that's, that's really what it's all about you know is just like unleashing the creativity of the developer work for us and just + +[23:00] seeing what you guys build you know I'm just like super excited to see like what do things new models new paradigms people build built on DeFi built on chain just totally changing the game you know. Because Stellar has you know a lot going for it's super fast it's super cheap and it's where blockchain it's the real world our audit offer ecosystem is you know way better than any other G. So like yeah I'm just super excited to see what people build I can't wait once we put these tools in your hands and you guys build some magicw excitement excited for Bri a lot of things I guess I'm really excited to bring yeah just like I'm a writer. So bringing + +[24:00] awareness to all the really cool things, that all these amazing people are building in this room and also the ecosystem as well I did write a piece on blend recently, that was really fun. So talking about yeah talking about projects and how projects are working together and putting together blog pieces to kind of bring awareness to the really cool things, that are being done is what I'm really excited about and hopefully continuing to kale lure. Because I'm obsessed kale and I just write more. So yeah Tyler I think you already said what you were excited about right yeah I mean I'm excited to see the daily invocations go up I really want to see a pass key a smart wallet get like an actual interface get audited like we're using them on Main net and we're holding pale in them. But I want to see one of these + +[25:00] actually get audited and get much wider use I want to see a lot of fun things to do on chain, that are orbiting around these pasy wallets where there's an ecosystem, that kind of grows up, that knows has buil some standards around how to interoperate between websites like pesis are not terribly intuitive to figure out how to share them. So we'll need some coordination and standards. But I think you know 2025 is the year the G address dies and the C address reign supreme it's not everyone agrees with me. But they're wrong. Because address is going to take over the world and I'm here for it all right well, that's prediction. So all right another thing we thought we might do just as a team in the same room is you know we are very close I think we're like a month and a couple + +[26:00] days from the anniversary of svon a net. And so we thought we would you know either share a favorite story from the last year cool development you know projects, that were awesome, that we saw come into the ecosystem who would like to start I won't take the first one. So I you know whoever submitted, that first transaction on inter they, that was. So fun and smart of them to do, that I wonder who, that was I just thought they, that was amazing I mean within literal seconds, that it closed it's like they really worked hard to prepare for, that moment for, that to happen and had waited for many years for smart contract. So I don't know you know who, that was. But tudos to them for working and doing, that and putting a little poem on chain is the first + +[27:00] transaction you know it's funny. Because Tyler really likes to be first really liked it sometimes he's not first, though and it's really fun, that is like the most fun. When he comes in second you know please yes what will we share oh favorite Memories the other thing, that I think was really cool was blend launching. Because I've followed their story. So closely since way back. When we were doing Turrets and we were using JavaScript VMS as like an L2 to try and figure out a way to do smart things on Stellar. And so following their Journey from really. When I first joined as a contractor at SDF way back. When they had found Stellar and were trying to figure out how to do some of their advanced stuff they were still like in college and super smart. But trying to like figure out to do what they wanted to do on Stellar. And then working + +[28:00] through with the turret smart contract protocol. And then actually building a smart contract protocol and convincing the network, that we needed it and, that we should build it. And then working through to actually see, that go onto Main net. And then seeing blend launch on mainnet and people actually using it, and now there's tvl there, that narrative kind of seeing, that come all the way full circle and go live after just years and years of watching, that team and working with, that team, that was a really cool moment for me awesome everyone else stories favorites from last year one year of Sor bom I think I can something, that I think is has been a really good experience is, that we have worked with an incubator in collaboration with cell Community Fund and Ru and + +[29:00] a couple of us have been a mentor for some of these teams and see them come in some of them not having any experience writing smart contracts they had an idea they had something, that they thought they could build and in a matter of in some cases days. But in a couple of weeks some of them have the first smart contracts up and running they building they're really enthusiastic and I think. When you consider, that soran is not even one year old, that we had the documentation we had sample code I think, that's we can't give the team, that worked on San enough credit for, that I think a lot of other cases it takes years to build out documentation sample code have a support system and being able to really do something substantial and I + +[30:00] think I think U I came in later I can't take any credit for, that. But I just think it's amazing to see developers get up and running writing smart contracts deploying their contracts to test net and to main, that within few days after the first time they ever get to know about soran. So so I think, that's a really cool thing and I think I have a lot of appreciation for all the work, that went into, that up to the launch of soran yeah, that's awesome yeah I think a big narrative for last year was the onchain effect. So having narratives. If you're familiar with the story basically there's an entrepreneur who was in San Francisco Columbia, that was able to extend opportunity to be a remote personal assisted to people, that live in a cash + +[31:00] only Village and they were able to basically hold their Savings in stablecoins on Stellar and get money out through money graah via our integration in their local currency and, that was like a narrative U, that I saw like repeated on you know on LinkedIn on Twitter by non web3 people I saw people on Web Two In traditional Tech even not tech people like Echo the story, that just makes sense normal people you know it's like it's a story you could tell Grandma to you can tell to Grandma over the Thanksgiving table, that you'll understand. So like seeing a A Narrative of story, that actually resonates with the like non crypto people it like shows a real use case like this is what you can do on chain, that was an amazing story and it was only possible on Stellar, that was one of the big highlights for me last here for just seeing the real potential of crypto + +[32:00] other than meem coins you know other than PP do fund not, that there's anything wrong with Bean points you know fun is fun people love fun. But yeah okay I guess like one thing, that's been really fun as I've just like been able to write about Soroban is kind of why sorond developers have made certain decisions such as like State exploration and you know how metering works for fees and, that sort of thing and just kind of how the platform is being built for sustainability and learning from the experiences of like other networks has been really cool to learn about and gives me a lot of faith and you know I just really think, that our I don't know, that the developers are amazing, that I've built the platform and making these + +[33:00] kind of hard decisions and talking about it has been really fun and interesting to see and also really fun and interesting to write about. So yeah, that's awesome I mean I feel like I'm just gonna Echo what everybody else is saying. But like you know it was I feel like there was there were moments of kind of crystallization of these things like half Meridian everybody who was there sat in, that room and didn't sleep and made stuff, that was really awesome overnight and you know like I mean I just think it was really awesome to see you know we had a lot of the team present on pass keys and kind of run through workshops. And then you know have P Tyler's pasky kit and have a lot of these teams kind of take, that and build things like by the next day and, that was really awesome. And so you know as far as being a soron + +[34:00] narrative, that was pretty cool. So one of the things I've had my hand in over the last year is trying to get Stellar's representation on places like DeFi Lama and coin gecko like more prominent and more reflective of what actually is happening on the network and. If you look at like art tvl perhaps a vanity metric perhaps not. But it shows like at least the utilization to some degree of what people are putting onto the network. If you look at def llama's tvl Soroban went main net on February 20th and at, that time our tvl was like right around eight million or. So and a week later it doubles like overnight to 16 17 million, and now we're like at 60 million just like obviously smart contracts or something, that the community was hungry for and we listened + +[35:00] and we did the thing and like surprisingly people are using it oh my gosh they said they wanted to use it and they're gonna actually use it who knew it's just. So exciting to see it like all the work, that we put into it validated and actually going to like use cases going to do things, that people are going to take advantage of and use yeah and yeah it's awesome and like. When we say to it's like the entire ecosystem you know like I mean. But so crucially it's awesome all right we had a question about eth Denver I don't want to scoop anybody. So this isn't GNA be an announcement. But there will be something really fun at e Denver this year is what I will say and I think somebody did share the link it may involve cards there was chips yeah poker + +[36:00] game was shared on the build on Stellar Twitter I don't know how to interpret risk level shark infested, that's I don't know. But details dropping soon. So stay tuned we are not gonna scoop anybody right. Now are the sh gonna have lasers stayed we're not sure yet and yeah I mean I think we can maybe take some questions from chat. If there are questions other questions or comments or things, that people are thinking about or things, that you want to see or things you want to do in the ecosystem let us know or. If you need an address to send all your tokens to oh the do B do + +[37:00] B yeah I don't know I mean well I guess like what does everybody think about Dev do Don Bri is going on a very committed maybe, that's a strong we'll have to see no I mean like I loved running the dock found it was good it's yeah it's a good way to get documentation and get community and ecosystem involvement so, which is are all wonderful things. But it's there is much overhead for administering a program like, that what will Tyler Tinker with next yeah, that's a good question after pasy aren't the hot thing anymore are they gonna paskis are always goody yeah heard of AI + +[38:00] agents I might be tinkering with, that I like AI a lot and I did a coding with kale just talked about this anything. But AI on AI agents yeah anything. But AI agent yep sorry Mattias why Tyler why am I gonna work on AI. Because Matias doesn't want me to right I have a life mission AI agents. Because I really like the idea of making Bots, that can do things for me so, that I don't have to this particularly with like blend here's my I here's my use case for what I'm trying to pursue I like blend a lot I really don't like claiming my emissions or rebalancing between the different fols. So I want to build a bot, that will handle all of, that for me by + +[39:00] reading inbound transactions or like reading the current state of all the pools reading the current state of my balances in the emissions pools seeing. If it's time to try and move between different pools. But I don't want to have to like manually do any of, that. And so I want to build an AI agent, that can handle all, that for me so. When you combine things like the smart wallets with the policy designers and AI agents I think you can pretty easily build some tools, that are in this case custom built to blend. But would be pretty easy to customize for a bunch of different contracts you can think of I don't know kale or whatever where you could chat with it or you could just send it into some sort of Cron job where the prompts are data, that's being fit in. So an AI prompt doesn't have to be like written English it can be like Json files or. If you've got a parser right it could be a meta from a + +[40:00] transaction, that could. Then feed into some sort of outcome like rebalancing or filling orders I mean there's. So many interesting things, that could happen. When you combine these smart wallets, which. Now you don't have to like have a human involved in signing things with policy signers. So you don't have to worry about giving too much permission to some third party service you can very tight bound what they're allowed to do only claim emmissions only for me only on the blend protocol only for a certain amount of money in a certain period of time give, that to an AI agent, that can. Then interpret those rules know its limitations and start to act on those rules for the benefit of the user, that type of stuff's really interesting to me it's a lot of experimentation but, that's Sly something I'm interested in working on. And then just more kale type of games. So I've got one of those, that I'm working on right. Now hopefully it'll be here sometime in quarter one + +[41:00] we'll see you have another question how do you feel about well apparently according to StellarExpert. If you hold just the tiniest amount of kale you're a billionaire. So you could show her, that look at all the M all the money we've made I'm still waiting for someone to actually start Market making between like fcm and kale or to create a whole separate Farmers Market I want someone to make a farmers market. Because that's. So funny. Because we have like dexes and market makers and AMM and all of these things like what's the most fundamental base Market you could have from the dawn of time like a farmers market where I can sell my kale for chickens or ve other vegetables or coffee or you know someone's bead work + +[42:00] it's just asking to be turned into some sort of farmers market game. So should do, that. And then your imagination can run wild. Because we've also you know like we've been throwing around ideas, that would be really cool to see like we have a farmers's market could you pick up ingredients for you know really nice suit or I don't know the best kind of suit virtual suit yeah exactly kale soup you know yeah I've been trying to convince Tyler, that like Leaf Kale like real kale is delicious is a disgusting vegetable and only for digital representation farming future contract somebody should make a Marketplace and somebody should make recipes for kale and other vegetables kale soup stand + +[43:00] how yeah I think Al building all, that like Marketplace it's a lot of fun it's fun to play with but. If we break it down we could actually spin off a lot of cool sample projects to show how this is built and I think there's a lot of educational value in doing something like this it sounds super fun and it's like just having a good time here. But but there's actually a lot of learning, that goes into building something like, that we can, pass on to the community. So I think even, though it sounds like fun and crazy ideas, that there's a lot of learning in it and there's a lot of knowledge to be extracted from, that and I think, that's, that's, that's going to be one of our big jobs in 2025 is to take all these fun ideas and actually turn it into a knowledge and educational content. So it's not just fun all of it's there's a meaning to all this we have talked a lot about doing fun stuff + +[44:00] doing fun games doing fun in educational stuff and it's a it's learning should be fun yeah it's a great Point like the kale contract along with the fcm contract like there's actually a lot of really good learnings in there I mean I spent a lot of time. When I was writing, that contract to make it really efficient. So there are a lot of like very technical decisions, that I made in order to keep the cost of using kale low so, that it wouldn't overwhelm the network and drive up the cost of a lot of stuff. But it could be this thing, that could scale could get a lot of traction. But wouldn't affect the bottom line of the network too much. So there actually a lot of stuff in there, that could be really useful for anybody building contracts to employ into maybe more serious use cases good thing to note yeah other + +[45:00] questions collaborations with other community nonprofits or Dows I feel like it maybe a little bit more on the ecosystem growth side yeah we did partner with funding the comments, which is a big D for public kids yeah. But definitely more of an question. But yeah there are future plans yeah any other questions or ideas by the way yeah any spicy questions what other jalapeno, that's not a hot pepper like green pepper green grepper pale question + +[46:00] the hot take speed round I already gave mine g addresses aios am mikos the year of the SE address J all growing in 2025 stay tuned see we can't answer anything it's. So [Laughter] boring you don't ask for question one more question where's the alpha oh you want some Alpha oh stay tuned on I already dropped, that we're going to be working on another fun I think they're working on docs V3 + +[47:00] mer oh my gosh we're going to delete the docks everything's gonna be AI generated there's gonna be this automated person, that talks to you and answers all your questions all live, that would be wild, though like you go to a doc site this is a person, that like talks to you answers your questions hi my name is what's, that Iron Man the Iron Man character, that Jarvis hi I'm Jarvis how can I help you build sorond contract this is one for the year of the C address could it be helpful for contracts to interact with the DEX it could be helpful I don't know what the team is thinking about like the G Toc compatibility question of gry how do those things interact there's lots of debate between the current ecosystem with G addresses and the new growing ecosystem with C addresses it's a good question. But I don't think there's a whole lot of answers here yet until we + +[48:00] start to see how the ecosystem adapts to Smart wallets policy signers DeFi where the liquidity goes how different markets pop up, that maybe either take the place of the deck or move faster than the deck there's definitely a lot of inter like you got toor on one side saying we're going to remove the DEX you've got people on the other side saying the deck is the and will never go away. So lots of outstanding questions and ideas and can't just I don't know you can't just remove stuff there has to be better things to put in this place and, that just takes time to develop and do the research for and see who your partners will be who would Implement those things and can help with the design and the implementation and use cases and the wallets. So it's never been a better time to build. But there's also a lot of outstanding questions for what the future actually looks like questions about the future where is + +[49:00] Meridian G to [Laughter] [Laughter] be what team you think we're on, that's not our team well I don't know there. If there are there is one more question speed yeah I'm not sure about the status on, that I know they're still talking about speedex types of Technology. But I'm not sure where they currently are it's not where I spend my time. So I'm not sure there's still research happening towards it I think I do know, that I don't think it's speedex related. But there are some CAPs, that they've got open for like the next upcoming protocol releases and I think they're planning on doing some of those meetings on Discord. So and. Then I'm not sure. If it'll be on stream to Twitch as well. But keep an eye out for some of those it's a great place to kind of see where the core team is thinking + +[50:00] about some of these types of questions of where are we goingon to go next what updates do we need what's the what's coming next for Stell my one of my favorite Meridian highlights to answer ruger's question was watching Tyler's Hy presentation as he talked to his pre-recording self and said well thank you Tyler in his beautiful podcaster voice as he presented between his live demo and his a lot of comments about, that I don't feel like many of them they were like just underhanded like oh, that was interesting choice of you to pre-record everything yeah maybe we'll try to do it live. And then the internet will out and everyone will just be sitting there watching me fumble. And then we'll see who's laughing the dev meeting back on + +[51:00] YouTube wait do we do Dev meetings on YouTube oh like putting them up on YouTube yeah it is the we have actually started uploading it there's a playlist on the St YouTube channel there playlist for us meetings and this one will come up shortly too. So yes 2025 year of De meeting on YouTube [Laughter] oh yeah the playlist is up right. Now but we are going to stream it as, that's the plan to stream it both on Twitch and YouTube at the same time there you go cool all right well thanks everyone oh best way to reach the core team get have discussions Google group or something else show up with the SF office on the door DM kale no, that will not + +[52:00] work they're in Discord right A lot of them are in Discord again you don't Pi the route, that everybody else takes it's talking in Discord and asking specific questions being clear I can always get the attention, that you might work all right cool yeah coming to you live from the orus it's the never thanks everyone thank you we'll see you all around + +
diff --git a/meetings/2025-01-23.mdx b/meetings/2025-01-23.mdx new file mode 100644 index 0000000000..f74e56eecb --- /dev/null +++ b/meetings/2025-01-23.mdx @@ -0,0 +1,238 @@ +--- +title: "DeFi Risk Analytics Automated Liquidity and Soroban State Archival Changes" +description: "This overview highlights Soroban smart contracts, Soroban RPC, and Stellar Core." +authors: + - bastian-koh + - carsten-jacobsen + - garand-tyson + - siddharth-suresh + - timothy-baker +tags: + - developer + - spotlight + - CAP-57 + - CAP-62 + - CAP-65 + - CAP-66 +--- + +import YouTube from "@site/src/components/YouTube"; + +## Hoops Finance Update: Risk Analytics, Pool Rankings, and API Keys {#part-1} + + + +In this week's meeting, Hoops Finance founders Bastian and Tim shared a product update focused on making DeFi liquidity data easier to understand and integrate. They walked through a redesigned UI that ranks pools across multiple protocols and introduces a risk scoring approach intended to make liquidity pools more “human readable” for users evaluating opportunities. + +They also demoed an authentication flow with developer API keys, plus early work on “AI-friendly” endpoints that return compact, schema-described responses meant for tool consumption (not LLM-generated answers). The longer-term direction discussed was automated liquidity management via account/strategy contracts, with audits and staged rollout planned before enabling real funds. + +### Key Topics + +- Hoops Finance UI refresh with simplified navigation across Pools and Tokens views +- Pool aggregation across multiple ecosystem AMMs (e.g., Aquarius, Soroswap, Phoenix; Blend integration in progress) +- Pool rankings and analytics surfaced for decision-making: + - APR, volume, TVL, fees, and protocol/pair details + - Proprietary pool risk score built from multiple inputs (including social/market signals), with ongoing tuning +- Data quality and validation plans: + - Cross-checking against other ecosystem data sources + - A future validation system intended to improve correctness guarantees +- Tokens view for liquidity/volume leaderboards and quick inspection links to explorers/contracts +- Auth + developer tooling: + - Discord-based login + - API key generation for developers + - Interactive API playground and documentation + - API keys expected to become required for browser/client-side usage (origin-based controls) +- “AI endpoints” design: + - Compact/truncated outputs to reduce token/overhead + - Include schema alongside data for easier parsing + - Emphasis on preventing hallucinations by keeping responses grounded in API data +- Roadmap direction (high level): + - Automated liquidity strategies via account contracts with installable strategies + - Strategy-driven rebalancing, reward claiming, and liquidity movement + - Swap aggregation/adapters to normalize interactions across different pool types + - Wallet/passkey-based signer support planned, with passkeys positioned as the long-term UX path +- Launch posture: + - Avoid encouraging real-money usage until contracts are audited + - Expect staged feature releases (e.g., swaps/liquidity first; automation later) + +### Resources + +- [CAP-0057: Soroban State Archival Proofs](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0057.md) +- [CAP-0062: Partial State Archival](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) +- [CAP-0065: Reusable Soroban Module Cache](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) +- [CAP-0066: In-Memory Soroban State](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) + +
+ Video Transcript + +[00:00] Welcome everyone this is this week's Stellar Developer Meeting, and this today it's going to be a little bit different schedule than usual we will start with a presentation here from Hoops Finance. And then about half an hour from now we will switch over to Discord where the court team will give a CAP proposal presentation. But yeah welcome, that's Tim you guys are well known from our Discord and I'm super excited about hearing what progress You' have made with hoop Finance. So please take it away thank you so much Carston we are super excited to be presenting in the dev meeting to try to get some eyes on our project and well in our new update we did the entire UI from the scratch to improve + +[01:00] user experience and to make it easier for developers and for retail users in the future to start using our application. So this is our landing page at the moment we are working as well on launching a new one. But the goal here is to make human readable risk analytics for blockchain what's the idea of risk analytics is, that we do think, that liquidity pools are very scatter and they have an untapped potential and I believe Stellar really has, that potential hidden away and we can really unlock it. If we provide we clear and transparent data on these liquidity pools and different offerings right. So let's go to the app and see what we have here we just push an update hopefully it doesn't break there you go. So this is our new UI right. Now we have two main Pages, which is the pools page and the tokens + +[02:00] page in the pools page what we're doing is aggregating different automatic market makers from the ecosystem projects, that came from the Stellar Community Fund in this case we have Soros swap Aquarius blend and Phoenix we're still working to add a blend into your UI. But it's working through the API this is something, that should be out quite soon. But through our UI in the pools page you're going to be able to see these rankings right where we are ranking what the best APR pairs we have the best volume and best stablecoins obviously this is data from our indexer. So we got to use it with caution still. But we are working on creating a validation system to ensure, that always the data, that we're showing is accurate and we're trying to do, that with other projects in the ecosystem. So we can always compare and ensure, that everything is correct. But for example here in the table we can see different liquidity pulls from the + +[03:00] ecosystem and important data, that would help retail investors and really anyone, that wants to participate in DeFi have an idea of how they work and why would they want to participate in a liquid pool over another for example here we have the native and Aqua, which is XLM and Aqua pool right we have our volume chart and we have some overview on this like 24 hour volume value locked fees whatnot we have a proprietary risk score, that we generate from different data points as well as different analysis from social media from the internet, that gives us an idea of how risky a pool is there's many factors, that affect the risk score and we are continuously improving on this. But hopefully this would be essential for us to be able to launch in a nearby future what we want + +[04:00] to do smart savings account or automate automated liquidity management right in tokens it's very similar we have top tokens by volume right by liquidity and our top stablecoins and we can see information about these and we can go straight to the Explorer to check these tokens. So check the contract of these tokens and information on them few things here and there, that we got to fix of course. But but basically this dashboard is going to provide the Stellar user and really any user in the future with a One-Stop shop to save to invest and to analyze their Investments long term, which is really exciting another thing, that we' been working on is actually creating an authentication flow, which I think is very important. So we can get our users logged in and we can provide them with API Keys, which we were just working on right. Now so crossing my + +[05:00] fingers everything is good but. If you sign up with Discord for example we use Discord a lot instellar we can create this account and as you can see we get a profile where we can go to our developer account and create an API key, which we're going to call it Stellar or live and. When we create this API key we get it over here we can copy it And then we can start querying our API to get all this information maybe in future projects, that people in the audience is working on right possibly in the seller Community Fund possibly from other projects from other chains right the idea again is, that all this data is transparent and available not just through our dashboards. But also through our API looks a little bit like this it's pretty well documented for you to start playing around with it + +[06:00] at this moment I believe you do not need an API key to play with the API. But but it's always good, that you create an account and get your own key early. But basically you have here a playground to try out to get different metrics from different protocols for example Aqua over here is going to give us a response right it's going to tell us the pairs, that are found in the Aquarius protocol right through IDs through their fees LP token supplies and whatnot and within this is very early. But we are working on our AI endpoint, which is quite exciting, which is how can we talk to these metrics in a like a human would right and have this AI basically transform all this information in something, that is more pable like asking hey what's the best APR pool today and the I tell us just by quering the API and give us + +[07:00] that information in a way, that is understandable obviously this is still on the works and we're making sure, that the information provided is always correct and is always coming directly from our API and, that the AI doesn't hallucinate right. But I think this is really exciting we can see it being implemented in other protocol just to inject on, that hallucination those are end points to be used by large language models and not generated by a large language model absolutely. So so yeah it would be easy for us to integrate this in just in other protocols or even in wallets for example right in the Stellar ecosystem there are many wallets. So to have these little assistant, that can ask this metrics very quickly and just give you an answer, that is understandable I think could create quite a disruptive change. If if. If I May + +[08:00] but. But yeah I'll leave the floor to Tim to silence you all know him I to explain a little bit more the technical details of what was been worked on and yeah take it away Tim you're mut you're muted oh wonderful, that's good can you hear me. Now right yes fashion was talking a bit about the risk metrics. If you look at the get statistics endpoint you can see like the various actual metrics and weights, that we make the score from I plan to show those on the front end in various ways right. Now it's just basically the aggregated scores. But we offer a bunch of different scoring + +[09:00] methods for different pools there's also a way to generate those for tokens right. Now we only offer it for pools publicly. But we plan to add the token statistics soon you mentioned, that the API doesn't require an API key right now, that's correct. However it would require a key. If you were like interacting from a different origin. So like. If you wanted to use it on the client side of your website also Keys would be required for us probably in about a week from now so just a heads up on, that I don't know what else the a the yeah the AI endpoints some of them are basically the same thing as the other end points. But they return the values in a much more Compact and truncated way so, that. If + +[10:00] you were using this in a language model, which got a little bit of experience with, that and you don't want to have a bunch of excess data. If you look at a normal API response object in Json it's got for example the label of the field on for every value and you really don't need, that. So like the AI end points are like they're self-describing they return a schema Jon schema along with the data so, that a large language model is able to know what the data is and pars it does things like truncates the addresses. Because it doesn't need the whole address most of the time you can change those truncation things with URL prams different + +[11:00] options those options are not yet documented. Because that's, that system still in development as far as data validation we plan to use simultaneously building like sore band contracts, that allow you to use this data more or less as a controller to make automated trades I don't know. If you showed them the portfolio page. But basically you would set up an account contract, that you. Then install strategies too those strategies will tell the account. When to make trades. When to move liquidity things like, that. So you set like a base value or a base currency on the account right now, that is only like XLM USDC or euroc. But in theory it could be anything. And then the account's goal is to generate + +[12:00] yields. So it earns rewards from AMMs claims and reinvests those and automatically moves your liquidity between different markets based on your the rules, that you set in the strategy you can also make manual strategies where it's like you tell it exactly what pairs you want to use at what ratios and how off into to claim rewards things like, that. But with, that system it uses some of the data from the API, which is all calculated using transactional data, that's on chain. And so what we plan to do is more or less have a an asset in the protocol, that's in order to stake, that asset you need to run a validation set on the data. And then publish proof of, that to the chain + +[13:00] that is still very much in development. So we're trying to find ways to keep as much data off-chain as possible. While still being able to accomplish what we want to with, that system basically it uses on the back end a swap aggregation system, that it Aggregates liquidity pools. But can work with many different types of liquidity pools different protocols and gives them all the same interface using adapters to allow our account contracts to be able to not necessarily have to specify what pool it wants to interact with it's able to derive it based on yeah information, that's on chain. So yeah we're also working on a yeah chat a chatbot + +[14:00] system, that will you know help you develop different strategies answer questions about like what's a good Market to invest this in you know what Market has the highest volatility things like, that yes I'm not sure what else to show you mentioned the O system the O systems pretty cool it's on our back end we have our own we're like a SLE provider to this website this is like sort of like client side off using access tokens and refresh tokens. But we've made it in such a way, that it can be integrated using a system very similar to SCP 10 to link your Stellar account to your Hoops account and use. So you'll be able to use your + +[15:00] signer for your savings account contract can be a pass key or a traditional wallet basically. And then you can also like log into you have to create your account using either your email or a social provider. But once you've created your identity on our back end you can. Then link wallets to it and be able to log into our website through either a pass key, which that pass key ends up being the signer on your account or through your wallet, which could also be Aigner on the same account yeah we really believe, that pass keys are really essential for moving on in the future just to ensure a better user experience I think as we go along for the everyday folk just keeping a bunch of wallets it's difficult to remember those private Keys as well right. So + +[16:00] past Keys We believe is the future and we're looking forward to implement this into Mobile as well with a simplify version of the app. So oh yeah to be clear all the Stellar off right. Now and pass keyo we have disabled. Because we are developing this system against main net and you know all the data is from mainnet and we don't want people to be risking real money until we've had you know the contracts ready for release to the point where everything's been audited and, that we know it's safe to use with real money right. So we may offer a test net version at some point it's a little bit hard. Because we have to build all the protocols on testnet as well in order to do, that. And then generate traffic through all those protocols, which for most things it's not needed + +[17:00] since we have live data to use you can basically simulate most things. So yes. So we leave you with a call to action really to just go to the Discord under projects we have hoops Finance just try it out try to break things ask questions and let us know what you would like to see in the app what would you think would be best and yeah we're working on making authentication as strong as possible. So we can start keeping track of these users and start helping new projects and new developers to actually use this infrastructure, that we believe could really change and shake things up with within Stellar, that's really great it's impressive to see how far you've come already I was just wondering U do you have any ideas what the timeline looks like. When when are you planning to go live on mainnet with the protocol + +[18:00] I would say this summer. But it depends on. When we are ready for the Audits. And then how long the audits take you know we're actively working on it there's parts of it, that may go live on mainnet sooner. So we're trying to build it in stages so, that it can be basically used as we devel each feature however, that's probably going to be limited to like swaps and liquidity deposit with draw initially with some of the automated strategies. Then being added in so. If that makes sense more or less trying to be able to launch what we can, that we know is safe. While still making it so, that the system will fit together. When it's live. So the actual token is + +[19:00] issuance is probably going to come through you know once the account factories are audited and live. Because it sort of integrates with the treasury accounts, that control the token. So so yeah it'll take a little bit. But we are focused right. Now on getting all this on testet we kind of have to build a around a little bit to create these new pools and everything. But but testnet will be out soon I don't want to give dates maybe earlier maybe later. But test net will be out. So so we can start seeing how this works behind the scenes and to answer twitch I did not connect to Twitch. So I cannot chat. But but yes Hoops wallet or something of the sorts is coming on the road as well I had worked on a wallet myself in the past many moths ago. So I'm we're + +[20:00] probably gonna be retr I imagine, that is going to be something of an app, that allows you to interact with your account contract easily to be able to do sends withdraw you know standard wallet stuff even, though it is a Sor band contract. So some way, that it could allow you to use funds from, that to interact with other parts of Stellar besides Soo yeah. When we think about Mobile Wallet I ask you to think about pass keys I'll just leave it at, that it sounds great really at the end of the day your wallet like. If if you have your pass key issued from your phone. Then then your phone really is the wallet. So it's quite easy to make the system work multiple devices just. Because of all the work, that's gone into passwordless off already by. So many other + +[21:00] people exactly and it just goes hands in hands with the idea of smart savings accounts right okay just quick TR oh. So the UI looks amazing and just what we've seen here this last few minutes is looking very promising. And then you have the API, that's, that's really well documented. So who do you think is going to how is this going to be used who's is it more going to be used with the web interface or is it more like a developer solution or both what are your ideas on what thoughts about, that I think there'll be a lot of end users. But I think, that there will also be Enterprise customers I know we've talked to certain other projects in the ecosystem, that have been interested in using some of like the + +[22:00] scoring data like inside their apps for like profiles about projects and I figured we need all this data in order to do the stuff we want to do with the automated accounts we might as well make it easy for other people to build on top of it as well. And so I imagine also there's a good business opportunity in the normalized data, that we provide. So yeah it's kind of both developer side it. But I'd say most people would know it through the web interface developers right we want to encourage them to use the API and just people, that have been in the default Stellar for a. While to use this tool + +[23:00] and as we move along and just more people is being on board in Stellar every day right to just have the basis for this becoming a retail application right. So to have the everyone to really be able to use it and get to this ideal of a universal savings account, which can be put as a retirement plan right, which I believe is something, that needs to change in the coming yeah our goals make an account, that it's a yield bearing account, that's permissionless basically, that anybody can have and it you know they manage it. But at the same time our application helps right. So you could build on top of it and make your own interfaces I think, that's good for everyone, that's what absolutely + +[24:00] thank you we are unfortunately running out of time it was really great to see all the progress here some of your thoughts about the future of the project and what you're working on and I'll encourage everyone to go take a look at it as you both said go find some errors go test it out yeah poke it yeah I would probably not use the data in anything live right. Now just the heads up there's no way we can promise every. Because there's the audits there haven't been any audits done yet. So yeah and it's under active development very active like we merge to main quite often and every time, that's, that potential breaks. So many sleepless nights for sure thank you so much Carson I think it could be fun to do an update in a few months and + +[25:00] see where you are. Because this looks very promising yeah we're moving pretty fast I think we'll have plenty of updates okay great and thank you for everyone who joined right. Now we're going to switch over to Discord where the court team is going to give a presentation about some of the proposals they are launching related to Protocol 23. So see you over there thank you and join again next Thursday bye-bye + +
+ +## CAP-62 Partial State Archival and CAP-66 In-Memory Soroban State {#part-2} + + + +This recording covers proposed Soroban state archival and performance changes planned around Protocol 23. The core theme was separating live Soroban state from archived state to unlock faster execution, fairer fees, and a smoother restore experience—without immediately requiring full “delete-from-validators + proof-based restore” mechanics. + +The proposal introduces partial state archival (evicting expired entries from the live state into a validator-maintained hot archive that is not yet deleted), then leverages that separation to cache live Soroban state in memory. A major UX improvement discussed is automatic restore during `invokeHostFunction`, while still keeping incentives to manage TTL and rent costs. + +### Key Topics + +- Why state archival matters long-term, and why “full archival” is being deferred for now +- CAP-62: Partial state archival + - Evict expired entries from live state into a hot archive database + - Validators keep the hot archive (no deletion/merkle-proof requirement yet) + - Designed as a stepping stone toward eventual full archival +- CAP-66: In-memory live Soroban state + - Live Soroban state cached in RAM due to a soft cap enforced by fees/rent mechanics + - Classic state remains on disk (no rent/archival yet → runaway RAM risk) +- Fee model refinement: + - Proposal to base Soroban write fees on live Soroban state size (not total classic + Soroban) + - Rationale: classic state dominates size today; Soroban fees should respond to Soroban usage +- Resource model changes: + - Split reads into in-memory vs on-disk resources + - No in-memory read bytes limit (still capped by entry count + CPU metering) + - Disk reads still metered/limited for classic state and archived-state access +- Automatic restore: + - `invokeHostFunction` can restore archived footprint entries automatically + - Reduces multi-tx flows and improves UX, but restores are not “free” (disk reads/fees still apply) +- Limits and edge cases: + - Automatic restore may not cover scenarios where too many archived entries exceed on-disk limits + - In those cases, explicit restore/TTL extension flows remain necessary +- Operational/implementation notes raised in Q&A: + - Ability to detect live vs archived status during simulation via captive-core/RPC querying + - Upgrade-time concerns about reinterpreting config settings and initializing new targets safely + - Why exposing SQL-like query primitives directly from core DB isn’t the direction (bucketing + specialized storage) + +### Resources + +- [CAP-0062](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) — [Discussion](https://github.com/stellar/stellar-protocol/discussions/1575) +- [CAP-0066](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) — [Discussion](https://github.com/stellar/stellar-protocol/discussions/1585) +- [CAP-0057: Soroban State Archival Proofs](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0057.md) +- [CAP-0065: Reusable Soroban Module Cache](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) + +
+ Video Transcript + +[00:00] The first is partial stay archival [CAP-62](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md). And then in memory Soroban state for [CAP-66](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) and no Tom I will never stop using in Centric Linux distro cool. So I guess before I get started guess a little bit of background. So Protocol 23 is kind of where the rubber is going to start hitting the road as far as state archival is concerned. So kind of some you know background to you. When soron launched we of course had the interface for state archival with rent and all those sort of stuff with the intention, that eventually entries, that have run out of rent will be archived. And then removed from validators in order to free up space. So you don't have you know the issues, that come with large amounts of state, that have to be maintained and so, that's kind of where we're going. Now today the interface is such, that you still pay rent you still have to issue restores and all the sort of things. But + +[01:00] the data is not actually yet removed from validators and so, that's where we're going. So initially the plan was for Protocol 23 was to have what we're calling full State archival and in full State archival what happens is entries once they have run out of rent be removed from the live State. And then they are added to this temporary data store called the hot archive is still maintained by all validators. But it's just a separate database, that just maintains entries, that have been recently archived the thinking being was, that eventually this hot archive would become full and. When the hot archive is full what you would do is you would create a Merkle tree of, that data validators would maintain the Merkel rout. And then delete all the information in the hot archive. And then you just repeat this process iteratively. So essentially you evict entries from the live state to the hot archive state + +[02:00] eventually, that hot archive cach will become full. And then you'll actually delete and remove those entries from the validator. And then the restoration process once an entry has been archived in this way. If it no longer lives in the validator there's like a Merkel style proving scheme to, which you are able to restore an entry back to the live ler state so, that's kind of what we consider full state archival, which is where entries actually get deleted from validators. But thinking about this problem a little bit more and looking at the current metrics of soron it seems, that we're still a little too early for this full C archival I think long term. If you look at smart contract platforms, that have large amounts of State there are significant issues with maintaining all, that state you have lots of you know Hardware requirements on a network like salana to maintain large amounts of cashes. And then you have a networks like Ethereum, that don't have large hard requirements. But they're very slow just due to maintaining these very large + +[03:00] databases. So long term at scale I think it's still very important to have the full State archival solution where entries are deleted from validators. And then restored VI proving schemes. But the reality of the situation is we're not quite there yet and I think, that currently there is less than a gigabyte of soron state currently live Stellar. And so going through all these hoops and adding all this complexity for this proving scheme just to delete a small amount of data isn't really worthwhile at this point and so, that's why for Protocol 23 instead of going the full cival route where we actually delete entries we are doing something called or I'm proposing something called partial SE archival and this is what [CAP-62](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) kind of explains. And so in partial State archival what we do is we do kind of the first half of the full state archival. So we still maintain two different databases on the validator you have the live bucket list, which contains + +[04:00] all of your live state, which is the The Ledger, that exist today. And then you still have a what's called the hot archive, which is a cache of recently archived entries. And so what you would do is, that whenever an entry runs out of rent it would be evicted and removed from the live State and added to this hot archive database. Now the key distinction here with partial stay archival is even, though you still remove things from the live bucket list and add them to the hot archive you never actually delete the hot archive never becomes full such, that the entries are never actually removed from validators. And so it's partial St archival. Because you are still kind of storing live state in one database and storing archive state in a different database. But you're not actually removing any state from validators. And so I think my current proposal would be in Protocol 23 to implement the partial state archival with the intention of later on in the + +[05:00] future extending this to the full State archival solution the only reason is, that I think, that for the you can the size at, which the hot archive becomes full and becomes deleted is configurable. And so I think we could do something reasonable such as we could Implement on this St archival solution. But we could set the capacity of the hot archive to something very high like 50 GB such, that it would take a long time and a lot of network activity to actually start deleting State. And then you know. If the network was to grow, that much such, that we had 50 gigabytes of archived sorb on state. Then it would actually make more sense to start deleting State and requiring proofs for all the operational benefit, that we get there and so, that's kind of the current proposal of 62 is we're going to still maintain all information on the validators. But we're just going to move archived information from one database + +[06:00] to a different database. Now the reason, that we want to do this separation of archive State and live state is, that it actually opens up a large number of optimizations and so, that's what we get into in [CAP-66](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md), which is inmemory Soroban State. And so we can. Because we have the system where the live State or the live bucket list holds all classic information and all live soron information and we have a completely separate database, that stores all the archive State now. Because of the rent system and. Because of the way, that we do write fees you know where a write fee is a function of the total size of the bucket list we actually have a way to put a soft limit on the amount of live soron State at all times the reason being is, that you know with the current bucketless size. If you were to add enough life State such, that + +[07:00] you go beyond the target bucka size rights become very expensive such, that the network users are incentivized to allow entries to run out of rent and become archived and so. Because of the way our fee system works we have a way to essentially have a soft limit on the amount of State in the light bucket list at all times. And so what I'm proposing is to change, that fee slightly such, that instead of the Soroban right fee corresponding to the size of the entire live leer of both classic and Soroban entries it only applies to the life soron entries. And so essentially the buckus target size instead of being a buckus size would change to just be the life soron Target size and I think this is much more fair given, that classic entries don't actually have to pay rent yet. And so it's a little unfair, that adding classic entries actually changes the WR fees and the rent fees for Soroban and especially as the network exists + +[08:00] today classic State dominates sorant State size. And so changes in sorant usage don't actually really affect soron right fees rather changes in classic usage affect soron right fees. And so by changing the The Bucket List Target size to a Soroban state instead of just all total State we have a much more fair fee system. But what, that also allows us to do is to prioritize live sorup on state above arive state. So what do I mean by, that so. If we change the way, that we calculate fees to only look at sbon size we can use the fee system to enforce a maximum amount of Life State at any time. So for instance we could set the target soron State size to 1 Gigabyte. And then the fee system would ensure, that there's not much more than 1 gigabyte of Life Sor on state at a given point. Now you could maybe you know go a little bit above, that. If people are willing to pay expensive fees + +[09:00] but the way, that the fee growth works is, that you know you are reasonably capped to a small amount of state and so. Because we have the system where the amount of live sorb on state at any given time is fixed what we can actually do is just store all Soroban state in memory and not have disk access at all and so, that's the current proposal in [CAP-66](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) is to prioritize all live soron state in memory and this is made possible. Because we store live soron state in one database and archived state in a different database. And so by splitting the state into two separate databases we can very easily just iterate over the live database and store all, that sorab on state in memory and so, that's kind of what's Happening behind the scenes as to what the validator is doing. Now we're able to do this. Because of the maximum soron State size. If we didn't have this and. If + +[10:00] Soroban LIF state was able to grow unboundedly this would be a very dangerous optimization. Because validators might run out of ram but. Because of the St kival system we can actually fix the amount of Life State. And so there's no runaway R risk. And so we can very reasonably store all soron state in memory. And so there are some changes we need to make to the developer experience and the user experience to make this possible. So first we are going to change some of the resource types A little bit. So currently today we only have one read resource, which is read bites and read entries and this assumes, that all the information you're reading is on disk. And so what we're going to do or what [CAP-66](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) proposes is to split this into two different resource types. So they're going to be an explicit inmemory read resource. And then an explicit on disk read resource. Now the reason we're doing this is, that even, though all Soroban state is or all life soron state is held + +[11:00] in memory Soroban contracts can still access classic State and classic State needs to be on disk now. Because classic entries aren't subject to State archival they have the runaway Ram risk. And so we can't store classic entries in memory. And so Soroban contracts will still have to pay disk fees for classic State, that's exist additionally we're only storing live state in memory and so. If you access archive state for example a restore operation. Then you would still have to do dis reads. And so there's a dis vew for, that. But essentially what would be changed is, that you would there would be a network limit for the maximum number of on disk read entries as well as the maximum number of inmemory read entries now, that being said. Because the inmemory reads are a lot cheaper than the on disk reads we can actually pass aot, that savings down to the user. So in this proposal there would actually be no in memory + +[12:00] read bites limit. So essentially the read limit for Life s on state would just completely go away. Because in memory reads are cheap. And so there's no reason to limit, that. Now we would still limit the total number of entries being read. But the bytes being read would not be limited additionally. Because we're not doing dis access there would no longer be a read fee associated with accessing Sor on State and so. Because you still have to pay like a instruction like CPU count and things like, that to actually process large amounts of data but. Because we're not going to disk there doesn't need to be an explicit fee or resource for, that. And so essentially for live SW on state you don't have to pay for reads and you can read as many bytes as you want you still have to pay for the CPU, though. So it's still an implicit fee. But there's no explicit read fee and so, that's kind of the First Advantage to the inmemory versus on disk resource. Now the second thing this allows us to do is also + +[13:00] Implement autor restore functionality. And so previously. When we first launched Soroban we weren't sure what the final State archival proof system was going to look like and so. While from a technological or from a technical standpoint there was no reason to require a separate restore operation and a separate invoke host function operation we did, that just to give us flexibility later on in case the proof system turned out to be very involved. But in [CAP-57](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0057.md) we've actually outlined a pretty lightweight proof system, that works with invoke host function. And so what we're going to do in [CAP-66](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) is we're going to allow automatic restore. And so what this means is, that you no longer will have to issue a restore operation prior to your invo host function. But actually your invo host function operation will just automatically restore any archive keys, that are in the footprint. And so this you know reduces the transaction + +[14:00] count required reduces fees and should just offer a much better user experience. Now the way this works in resources, though is, that like I mentioned before the live Soroban state is all cached in memory in one database and archive state is uncached and on dis in a separate database and so. If you call info Coast function and every entry you're using is currently live. Then you would have the free inmemory resource bites and you wouldn't have to pay for dis, that being said. If you're using automatic restore the entries being restored would come out of the disk read bytes and would be charged disk fees. Because again for the entries, that are archived and live in the hot archive database those do have to be read off a disk. And so I think, that's kind of a kind of at the high level of what we're proposing kind of you know the tldr except been talking for a + +[15:00] little bit is, that you know C the archived entries live in their own database and live sbond State lives in the separate dat or a live database we are. Then going to Cache all the sbond state in the live database in order to pass, that savings on to you there will be an inmemory rebite limit and an on disk rebite limit in fee. And then finally there will be automatic restore to you know essentially remove the need for the restore operation in most cases. So I guess are there any questions or any conversation points we'd like to touch on more looks like there's a question in the this in the chat box. But I think a lot of dApps and extend TTL by default will, that still be necessary ah yeah. So I think. So just. Because we + +[16:00] have automatic restore doesn't mean, that you don't want to still manage your TTL. And so like I managed before. If all the entries, that you're using are currently live. Then what're or. Then you don't have to pay read fees and you have much larger read limits. And so you are still incentivized to pay rent also. So but the issue is. When you restore. something you have to pay right fees for the restoration and you also have to pay discre fees for the restoration. And so I think from a fees perspective. If you're using an entry a lot it's still in your best interest to extend the TTL to save money as you know even BEC just. Because the restore is automatic do not make mean it's free. And so you still have to pay for, that restore and even. If it's the same invo Coast function invoking a + +[17:00] function, that only accesses live state is significantly less expensive than invoking a host function, that has a automatic restore on the front end of, that. So we still definitely want to extent TL yeah let's see oh. So for OrbitLens will it be possible to tell in advance whether the entry will be automatically restored during the simulation yes. And so this is kind of more of the implement details, which are included in the CAP. But what we're doing is captive core has recently added a couple of HTTP n points for querying Ledger State, that will be used by RPC in order to simulate transactions correctly. And so essentially this endpoint is a high performance you know multi-thread HTTP endpoint, that has a similar performance to a SQL table queries. And so it should be appropriate for production use cases and what this + +[18:00] endpoint will do is it's a key value search where for every key you provide it will tell you. If that key exists and then. If it exists it'll give you the value. And then it will also give you meta information about, that key. And so it will give you the Ledger entry it will tell you. If it's live or archived. And then it will also tell you know what its current TTL value is and. If it's in memory or on disk. And so the captive core endpoint is kind of Ed to be the new kind of entry point for this information. And so you should be able to query the current archival state and the current in memory versus on dis state of any entry directly via captive core again there's also meta, that we're emitting for all these events so. If you wanted to it's theoretically possible to injust meta and maintain the state of Soroban entries, that way but. If you don't want to do, that and create your own SQL table and Pipeline and pipeline you can just use the captive core htpn + +[19:00] points. So will automatic restore become automatically available for existing contracts yes. So the this is all handled at the RPC level. And so the essentially what's changing is, that with Protocol 23 and this is detailed in [CAP-66](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) specifically is, that we are changing the footprint to have this field where you distinguish in the footprint. If a sorond key is either in memory or on disk. And so essentially what the validators will do is, that for whenever they receive and apply an info Coast function they will look at the footprint and for every Soroban entry, that is marked as being on disk AKA marked as being Arch before running, that transaction + +[20:00] the validator will essentially restore those entries automatically. And so the actual contract and the contract logic will not change, which means, that all deployed contracts are automatically compatible with this. Now the invocations to those contracts will change slightly. Because of the footprint changes. But again this will all be handled by RPC. And so pre-flight will do all this automatically let's see other questions oh. So ler streaming mode I'm not sure about the context. But but behind enabling or disallowing metast streams on validators versus captive core instances I imagine it has to do with performance reasons where you don't want ingestion to make a + +[21:00] validator fall out of sync and essentially, that config setting is an opinionated way of saying, that validator should be high performance and never get blocked whereas like a watcher node, that's not participating in validation would be more appropriate for observing and adjusting the meta. Because there's not it doesn't depend on a downstream system where. If the meta stream gets clogged. Because the downstream system isn't adjusting fast enough you wouldn't want to lose sync and have a validating node fall off the network. Because of a downst stream issue cool. So I guess + +[22:00] I'll from George about the CAP mentioned somewhere, that autor restore won't always be possible can you elaborate on these scenarios ah yes okay thank you for pointing this out. So there are a couple of edge cases where an invocation will still require an explicit R operation sorry. And so essentially. Because the inmemory reads are. So much cheaper they don't have limits like the on desk read the on just do. While there is no read byit limit at all and. While there is an entry read limit the expectation is, that this limit will be significantly higher than the dis limits. And so just for you know example suppose, that a in Protocol 23 the transaction in memory read limit is 40 + +[23:00] entries and the on disk read limit is 20 entries. And so say you have like this DEX you know trade, that will access 40 soron entries now. If all of those entries are Al. Then you know the it's within the limits the invocation Works no problem. But say, that all 40 of those entries are archived. Now even, though the inmemory limits are large enough for, that transaction to succeed you can only the automatic Restorations will come from the on disk limits and so. Because you have to pay disk fees and are subject to the dis limits for the restore operation you can only restore in this example 20 entries automatically even, though you need to have authority to be live to complete this DEX trate operation. And so in this scenario you would need to still manually submit a restore + +[24:00] operation just. Because the way, that the limits are set you can't fit, that many restores in a single transaction now, that being said especially given some other exciting work, that's happening in 23 we expect to raise limits pretty significantly across the board. And so I suspect, that this Edge case will not affect most transactions it will only affect very expensive transactions, that are doing stuff. And so for instance. If you have a DEX trade and it's trading assets, that are mostly live you won't be affected really you're only going to be affected. If you have like a DEX trade, that's crossing a ton of orders and for some reason all those orders were archived. So you mentioned, that the restore op could be deprecated + +[25:00] because of the automatic restore. But this Ed Case requires you to keep something like, that around right yeah. So I think I mentioned the CAP, that we met deprecate the restore op and, that's just. Because that. If the footprint is automatically restored. Then having both the restore op and the extend TTL op is kind of redundant. Because for instance say, that you just want to restore something you don't actually need two operation types you could just essentially use the extend TTL put all the keys you want to restore in the footprint. And then just set the TTL extension to zero and this is functionally equivalent to the restore up and so. When I mentioned deprecating the restore op I don't mean deprecating the ability to restore transaction or to restore entries via an explicit transaction. But just mean like you know mechanically do we need both the restore op and the extend TTL up could. Now you know in theory at least both do a + +[26:00] restoration as well as extend okay yeah, that makes sense Nico had a question about how the Soroban state size is initialized at upgrade time it's not specified in the CAP yeah I think I need to expand on this a little bit more. So I think part of this CAP is, that we are changing the semantic meaning of a network config setting. So so in particular The Bucket List Target size will become the Soroban state size. Now the issue is currently the bucket list is like 11 or 12 gigabytes. And so we all of our network settings are assuming, that the your target size is like 13 gigs. But now the issue is. If we you know do a protocol upgrade protocol upgrades previously have never actually changed config settings and so. If you just do the protocol upgrade all of a sudden instead of your Baseline for fees being 12 gigs with a target for + +[27:00] 13 gigs. Because we're only tracking Soroban State your target is still 13 gigs. But now your Baseline is like 400 megabytes. Because there's like a lot less soron State compared to life State. And then you have this dos attack where until you upgrade the network confix settings you essentially have no read or write fees for both in memory and on dis State. And so you could have like a Doss attack where someone writes like tons and tons of temp entries and like you know spams The Ledger for essentially zero fees. And so I think what I'm proposing is, that you know currently there's like an operational lag between upgrades. Because core validators can only cue one upgrade at a time. And so we'd have to get all of tier one to arm for the Protocol 23 upgrade. And then after, that goes through have them all arm for the network config setting upgrade and in between, that time you have free reads and free wrs, which is a huge security risk. And so what I'm proposing is, that. Because Protocol 23 is semantically changing + +[28:00] what this config setting means the protocol upgrade itself should also change the value. And so you know this is slightly different implementation wise than what we've done previously. But I think it should be relatively straightforward implementation whereas like the Protocol 23 upgrade both semantically changes what the Buist Target size means as well as it resets it to a initial starting value, that's more reasonable given this new interpretation of the data Okay. So we've actually updated settings on protocol upgrades before, that I think we know, that works cool okay great let's see a couple other questions okay. So for OrbitLens the storage for the hot archive yes. So the hot archive and the live Buck list are both part of ensus. So we need the + +[29:00] hash of, that state. And so for, that reason both of the live database and the hot archive database are both bucket list DB implementations and, that's just. Because they we have to meriz those structures. Then Buck list DB is pretty fast these days. Now with respect to offering tables to buckless DB we don't really have any plans to do, that and the reason is it's a very difficult structure to add tables to. So it's a log structured merge tree, which is kind of a variant of like database used by like rock CB or level DB and it's also completely made inhouse like we didn't Fork rocks or anything like, that. And so kind of we have it works very well for query types, that the valers require and it's very efficient at those. But we have to essentially like hand write C++ optimized code for those specific queries and. And so it would be both a + +[30:00] very significant undertaking to allow like you know arbitrary index types for Downstream and it would also probably not be a very efficient database just. Because it's a lock structured merge treat. And so a SQL style index query would not work very well on it. And so I think what I'd like to do with this is you know we've for arbitrary key value lookups we have exposed end points, that are on the same scale as SQL queres. But again they're just raw key value stores they're not like you know indexes or you know really tables and I think there's been a lot of work done by the platform folks on like the CDP and things like, that. And so I think given, that the complexity of the database of Stellar Core is increasing a lot and for a variety of reasons we only support Buist DB. Now and no longer support SQL I think, that any sort of raw database + +[31:00] access needs to move more in the direction of utilizing Downstream utilizing met ingestion using CDP and not rely on direct access to course databases just. Because you know nowadays with buck DB the core database is very specialized and is not suitable for generic queries cool I think there's a couple people typing. So I'll let them finish or. If anyone else has any other questions. If not I have a third CAP, that I'd like to introduce I'll give it a second. And then we can + +[32:00] move on all right I feel like, that's we've had enough time oh answer about slp1 dial would you mind linking, that question again I'm not quite sure what the slp1 question is oh yeah the new limits sorry yeah cool. So I guess. Now I'd like to move on to [CAP-65](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) the reusable module cache. And so like I mentioned before we were doing all this optimization stuff for a memory State and essentially in addition to saving all the contract data in memory + +[33:00] we can also save all the contract code and by extension all the contract modules in memory. Because you know we have a way of EX of archiving contract instances and contract code, that hasn't paid rep recently. And so with, that I think gr's on the call. If you want come up and talk about [CAP-65](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) I don't think gr's on the call I believe we were going to speak about CFE 65 next week right oh sorry I guess I got a I gave youall a little teaser for next week. Then my apologies got a littlee of the gun. But so yeah. So + +[34:00] I don't want to steal grain thunder. So I'll just you know leave you with a teaser, that we can you know have lots of this not only helps optimize the read limits. But also optimize CPU utilization as well. But we'll talk about, that more later all right unless there are any other questions we can conclude this meeting thanks great Garen it was a great talk all right thank you and you know the dis. If youall have any more questions or concerns you know there's a couple of discussion tabs on the CAPs or just ping me on Discord + +
diff --git a/meetings/2025-01-30.mdx b/meetings/2025-01-30.mdx new file mode 100644 index 0000000000..73fda58368 --- /dev/null +++ b/meetings/2025-01-30.mdx @@ -0,0 +1,214 @@ +--- +title: "Wallet Adoption and Soroban Protocol Improvements for Memos and Execution Performance" +description: "This overview highlights Soroban smart contracts, Horizon API, and validator operations." +authors: + - carsten-jacobsen + - dmytro-kozhevin + - graydon-hoare + - leigh-mcculloch + - siddharth-suresh +tags: + - developer + - spotlight + - CAP-62 + - CAP-64 + - CAP-65 +--- + +import YouTube from "@site/src/components/YouTube"; + +## Telegram Wallet Onboarding Game Roadmap {#part-1} + + + +This session featured RampMeDaddy, a Telegram-native wallet project focused on onboarding non-crypto users through simple, familiar experiences. The team shared how they are using a lightweight game as an acquisition funnel before launching their full wallet, with the goal of reducing onboarding friction and building brand recognition ahead of main functionality. + +The discussion highlighted how Telegram’s global reach and viral mechanics can be leveraged to introduce users to Stellar-based payments and meme-coin trading, while deferring complex wallet concepts until users are already engaged. + +### Key Topics + +- RampMeDaddy overview: + - Telegram-based wallet for global payments and meme-coin trading + - Long-term vision as a multi-chain meme-coin wallet +- Adoption strategy: + - Launching a simple “price up/down” Bitcoin game as a teaser + - Points-based gameplay with leaderboards and referrals + - Partnerships with large Telegram gaming communities to drive early traffic +- Funnel design: + - Use the game to build familiarity and trust before promoting the wallet + - Gradual conversion from player → wallet user +- Product status and roadmap: + - Wallet in active development, targeting a closed beta in the near term + - Emphasis on usability for non-crypto-native users (avoiding seed phrase complexity) +- Branding and growth: + - Creative, low-cost branding tactics to maximize awareness + - Focus on organic, community-driven distribution rather than paid acquisitio + +
+ Video Transcript + +[00:00] Welcome everyone to this week's Stellar Developer Meeting today I have a very busy guest who fortunately made the time to come here today it's Andre from RampMeDaddy and I'm going to let him on the stage as well hello Andre hi garon thanks for having me great to be here hi everyone yeah great to have you here you've been very busy I Met You in San Francisco on Monday and it seems like a lot of things are going on. But can you maybe just start by introducing yourself and your team and what you're working on yeah absolutely happy to my name is Andre I'm the co-founder CEO of RampMeDaddy was born at a consensus hosted by EA guys amazing time amazing it's it was a lot of fun and we rode, that wave + +[01:00] of fun and we rode, that wave of fun for the past whatever seven months at this point, that has turned into a real company, that's seeing real traction we're getting a lot of positive feedback we. So RampMeDaddy right. Now is a telegram digital wallet designed for Global Payments and for mcoin trading mcoins have been super hot and on you know on super hot and on you know on and more chains are picking it up. So we're super excited about, that ultimately the goal is to be multi-chain wallet for mem coins multi-chain wallet for mem coins but. When it comes to payments I mean, that's, that's something, that we started rent Maddy at the core of sending Global Payments and we want to make it as easy as sending a text message and Stellar has been extremely helpful with, that great. So so how far along are you in the process I know you've been working hard on it for quite some time. But what + +[02:00] on it for quite some time. But what is the status today how far along is the status today how far along are you yeah no it's. So today actually this call I'm doing out of the Draper University building facility we are just wrapping up our Draper University accelerator. So we did a three-week pre accelerator back in August, that was amazing time very had a very impact on what we're building got to meet a lot of amazing people well actually I think, that's, that's. When we met for the first time and actually got to work together we. But we were also able to meet denell we were able to pitch to Tim Draper we were able to accomplish a lot of things in just three weeks after, that I think we you know we showed our think we you know we showed our strength we got people interested in what we're building and Draper + +[03:00] University and Stell invited us back to the official accelerator, that's not 3 weeks it's 12 weeks and we have been just crushing it here with a team of 10 other companies building Innovative products and the idea the reason, that we decided to go with telegram in the first place is. Because telegram has a billion users worldwide and a lot of these users are in U you know there's a lot in United States. But there's United States. But there's users from everywhere in the world I think there's a number of groups where, that I'm an active participant in with people from I think we did some people from I think we did some calculations and it's about 40 countries. So I think it's amazing, that you know telegram brings it all together and we figured well let's see how we figured well let's see how we can facilitate payments inside, that and. But before we get into payments obviously we need to be able to + +[04:00] on-ramp onchain. So at the hackathon we built the on-ramping solution and we kind of have iterated a number of I don't want to say pivot. Because pivot usually to me see sounds like it you were doing something. Now you're doing something else I think we just re refined and reiterated in our idea to hit both I mean I it's to hit both functionality and revenue and those are two important pieces of any business and I think we're finally at a point where they're put together and it makes sense okay we were trying to get a screen share going did you get it to work yeah let's I think. So it would be fun to see it in action it would be fun to see it in action yeah. So I guess let me start with the. So RampMeDaddy is a telegram wallet + +[05:00] that we're still building I would, say I think our CTO will be not super happy with me. But I'll say maybe four to six weeks is left. But so. So we're going to roll it out. But I think there's a big strategy to how you roll things out. Because you can spend all of this time. And then roll out a product. And then start driving traffic to, that product. When it's live we decided to take a different approach we decided to roll out a teaser game inside telegram it's a very simple game you just guess. If the Bitcoin price is going to go up or down you tap on it. And then 5 seconds later it tells you. If you're. If you won or lost. If you won you get 10 points. deducted so, that's, that's the game in a nutshell we have been seeing a lot of positive feedback on, that game + +[06:00] and it's amazing to see how just walking around Draper University I would be passing by someone and I see someone sitting on the couch tapping playing the game so, that's, that's the idea we game so, that's the idea we were able to secure a couple of Partnerships with other telegram games, that have communities of millions of people I think between the all of the partners, that we have right. Now there's 50 million people in the community for those games and we have a very strategic partnership where they will drive traffic to our game and, that will engage players with our game and we get to tap into the $50 million Community sorry 50 million people community in those games so, that's. And then after the players are happy and they're excited with a game they're familiar with a brand. Then we'll start promoting the wallet to the to these clients and kind of it's it we expect it to be a much simpler conversion to get people to use the wallet. So it's + +[07:00] there's a strategy for the game it might seem like okay well what does the seem like okay well what does the game have to do with, that. But we put a lot of thought into it and did a lot of research and the client feedback to before we built it. So but yeah happy to share I just wanted to give some background. So people are not confused about what's going on here. So let me try to share my telegram window so. If you go to are you able to sh see my telegram window yeah awesome. So let's see so. If you go at RampMeDaddy bot and you type this in daddy bot and you type this in RampMeDaddy bot you can click Start and you can hit launch we launch the game and are you able to see the game no I don't see the game oh man it's probably. Because it's a window, that's shared not the screen yeah let me okay let me try to showare the this screen. So but you saw how we hit + +[08:00] the yeah the launch button let's see so. When after you hit the launch button the full screen let's see I wonder. If this is going to work it's not going to look as good as it looks on the okay let's see are you able to see the game. Now y we see the game okay sweet. So I mean obviously it looks a lot more organized on the screen. Because it's just the screen size. But you can see the Bitcoin price changing here. And then I would guess, that it's going up 5sec passes by a loss, that's, that's why I don't by a lot of day-to-day trading. But keep going keep trying and + +[09:00] win. So and this is basically it in the nutshell what we found to be very exciting for people is the leaderboard and funny enough we have a rot I hope I'm saying it right with his Stellar logo, which is super exciting and what's even more exciting is, that there's two people who play this game more than I do and I feel like I refer. So many people to this game. But you know we have mounder Trevor Hoffman he shows it a lot sends it to a lot of people. But these two guys have absolutely crushed it and it's amazing to see this support from these people. So you get 10 points for guessing the price right you get Five Points deducted for guessing it wrong and you can also invite friends. So this is a very important feature for us for the future of the product development for the wallet itself we allow you to earn points every time + +[10:00] you refer a friend and. If you click invite friends it allows you to it gives you a customized link, that you can share with your friends. But other than, that this is the game it's not a crazy amount of stuff, that has been, that has, that, that has been, that has, that has been built. But it is an engaging fun game, that has been allowing us to attract clients and U talk to clients to get their feedback and see what they want in the market I think it's personally I think it's a brilliant idea it's a great way to get some people excited get some awareness around what you're building and your brand and something, that's, that I've always noticed H I'm actually surprised you're not wearing your RampMeDaddy CAP today you got the T-shirt. Because you guys are always promoting your business always + +[11:00] so I've personally experienced it like Monday. When we had the event in s Francisco there was RampMeDaddy water bottles there was every everywhere and When when you guys said, that it was time to get go out and get changed for the event I was thinking okay maybe these maybe they going to show up in suits today. But no it was t-shirts with RampMeDaddy all over it and yeah know it's I mean I think it's worth mentioning talking about the Geo yeah and the idea I even heard a story from one of the other participants from drew there was something about, that you guys went out to eat at a restaurant and I guess they were out on napkins or something. And then out of your bag you pull napkins with RampMeDaddy QR code and logos on it yeah we tried to get creative with The Branding I think + +[12:00] there is I mean I think. When it comes to branding it's a very it could be a very expensive and resource consuming really bottomless bit of just branding and marketing money, that you can put in and we know, that firsthand from you know our previous company, that we were at itust amazing brand the strongest brand and it's very important for itust was doing crypto IRAs. But unfortunately don't have the budget of itust Capital right. So we have been trying to come up with the creative strategies to do brand awareness without spending a lot of money on it and I think, that's, a strong, that has to be present for a found founder to be successful to scale a retail facing product. So yeah I completely agree and some I've sometimes have discussions with + +[13:00] with other Founders in the crypto with other Founders in the crypto blockchain space and sometimes they have a difficult time onboarding non- crypto people or non- crypto users and I think by introducing the game first and by creating some awareness some curiosity around your brand I think is a great way to onboard people who are maybe not super much into crypto or meme coins and get them excited about it and I think, that's, that's something think I think, that's something, that I think is really going to be interesting to follow you guys and see your gain some tracks and get those people, that may not be super familiar with having to co coins or tokens or NFTs make it more like an everyday thing I think you're doing a great job with, that. So yeah it'll be interesting to follow we only have a few minutes left. But I'm super curious about what does your timeline look like. When it comes to the wallet and be + +[14:00] when it comes to the wallet and be able to toh to have to use meme coins. So the timeline for the wallet is we're I am personally hoping for about six weeks and I you know. So far I think we've been very strong on delivering everything, that we promise on time So we're trying to stick to, that history in traction. But we have been there's no shortage of have been there's no shortage of stuff to do. While the product is not out and I think establishing strong presence in brand is definitely one of those things, which is what we've been working on very hard as far as the wallet and the mem coins we as to launch in the are going to launch in the next six weeks with a set of next six weeks with a set of with most likely with a closed beta for some of the users to give us + +[15:00] feedback and we've been yeah we've been moving towards, that's super exciting Okay I think, that's all we have time for today. But I would love to do a followup. When when you hit the do a followup. When you hit the maybe. When you hit test net or hit the main net maybe do a followup and see how it looks like. But so far it's been super exciting to follow your journey and yeah see the hustle your journey and yeah see the hustle you do to get people excited about it I think, that's, that's something a lot I think, that's something a lot of startups can learn a lot from. So thank you so much for joining today absolutely thank you very much and our goal is to drive adoption for crypto and blockchain, which is extremely hard to do. If you have to worry about all the seat phrases all of the all of these Crypton native things and as you know I want people I already got my mom pretty involved in crypto she + +[16:00] has crypto in her Ira she has crypto wallets she does all of, that my next goal is to get my grandma involved and she is she's gonna be a client of a customer of RampMeDaddy and she'll be trading memes H so, that's, that's, that's the goal and, that's what we're working towards, that's amazing okay thank you so much for joining today and I'm looking forward to doing a followup. When when you have more to share I think, that could be interesting to do awk thank you so much for joining thanks for having me thank you bye, that was all we had time for today for the regular Stellar Developer Meeting in about five minutes we'll shift over to Discord where we have a protocol discussion today and both this meeting and the protocol discussion meeting will be available as a recording later So thank you for joining everyone bye + +
+ +## Memo Authorization and Faster Contract Execution via Module Caching {#part-2} + + + +In the protocol portion of the meeting, core contributors discussed two Core Advancement Proposals aimed at improving Soroban usability and performance. The focus was on enabling safer interaction with centralized exchanges via memo authorization, and significantly reducing execution costs by caching compiled contract modules. + +These proposals build on earlier Soroban architectural changes and are designed to be incremental, targeted improvements without disrupting existing contracts. + +### Key Topics + +- CAP-0065: Reusable Module Cache + - Caches compiled, ready-to-run contract modules in memory + - Eliminates repeated translation of WASM bytecode on every invocation + - Leverages live-state-in-memory assumptions introduced by CAP-62 + - Results in: + - Faster contract execution + - Lower CPU usage + - Reduced transaction costs + - Minimal protocol complexity; largely an internal optimization +- Execution behavior notes: + - Cache populated when contracts enter live state + - Edge cases discussed (e.g., upload-and-invoke in the same ledger) + - Worst-case behavior falls back to current costs without breaking correctness +- CAP-0064: Memo Authorization for Soroban + - Enables Soroban authorizations to explicitly sign transaction memos + - Addresses exchange deposit workflows that rely on memos for user attribution + - Prevents replay or misuse of Soroban authorizations with altered memos + - Maintains compatibility with existing exchange infrastructure +- Design tradeoffs discussed: + - Why memos are still relevant despite known limitations + - Why a narrow, surgical fix is preferred over a larger multiplexing redesign + - Acknowledgement that more general solutions (e.g., event-based attribution) may come later + +### Resources + +- [CAP-0064: Memo Authorization for Soroban](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md) — [Discussion](https://github.com/stellar/stellar-protocol/discussions/1610) +- [CAP-0065: Reusable Soroban Module Cache](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) — [Discussion](https://github.com/stellar/stellar-protocol/discussions/1615) +- [CAP-0062: Partial State Archival](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) + +
+ Video Transcript + +[00:00] All right welcome everyone to today's protocol meeting where we'll discuss two Core Advancement Proposal Gren will be speaking about [CAP-65](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md), that introduces a reusable module cache, which Garand actually alluded to last week and after, that Dimo will speak about [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md), which pertains to sbon authorization and memos with, that I'll hand it over to grden. So this is a probably smallest CAP, that I've done. So far and certainly the simplest to describe U. But it does actually have a fairly dramatic effect this really only makes sense in the context of [CAP-62](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md). So this is CAP sorry no I'm [CAP-62](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) this only makes sense in the context of the other CAP yeah no sorry I'm doing [CAP-65](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) this makes sense in the + +[01:00] context of [CAP-62](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) sorry getting all my 60s tangled up here. So in 62 Garen pointed out, that we in a position where we can basically keep the live letter state in memory constantly. Because of the ability to you know evict live state to disk and account for the movement of data between dis and memory explicitly in protocol events, that are surfaced to the user. So the user can tell you know what they're paying for and what they're getting out of paying for the things being in memory versus you know allowing a detail to expire and having it move to dis and, that obviously produces the ability to reduce IO costs. But there's another thing, that we can do on, that, that load or evict event. When when things move from dis to memory or memory to disk, that actually sort of solves a problem for us, that we have had for a long time, that has been holding us back from doing something very obvious. So it's a very + +[02:00] obvious optimization, that we've wanted to do since the beginning of Soroban, which is to Cache the compiled ready- to run version of Any Given smart contract in memory and it's ready to run form. So the wasm bite code, that you upload is essentially not ready to run it's it hasn't it has been validated. When you uploaded it. But theoretically it should be validated again before running. But also it gets translated into a sort of a secondary internal format for execution and in the future. If we ever use a jit this will actually be an even more involved process. But there is nonetheless even with an interpreter there's a translation phase and the important thing to understand is, that translation phase is fast. But it still takes time and you want to run your transactions as fast as possible. And so at the moment. If you look at. If if you sort of look at a execution profile of a contract. When it's running today. When you invoke a contract your transaction actually spends a fair amount of its time in many cases as much as half or even 2ir or three4 of its time just to during the doing the + +[03:00] translation getting it ready to run coming from. So so we pull the bik code off of dis. And then we retranslate it. And then jump into it and execute it. And then basically throw, that away and the reason we and. And then do it again for the next trans transaction and the next transaction and we do this over and over again. Because we don't really have a clear way of. If we were to cash it from one transaction to the next deciding who should pay for populating the cash and who should be able to B basically free ride on things already being in the cach and how to sort of reflect, that in the fee model U there's a lot of complexity here I don't really want to go into all the different options I mean I'm sure you can think of a way of making it work off the top of your head many people have we talked about this a lot. When we were developing the structure of Soroban and we couldn't really come up with one, that didn't have some weird incentive, that allowed people to you know price other people out of the transaction queue in a way, that was unfair or wound up making them overpay in certain circumstances in ways + +[04:00] that we wouldn't be able to refund stuff like, that. So ultimately up until. Now we have not done, that up until. Now for the first year sort of year or. So of zoran's existence it has executed in this fairly wasteful fashion where it is reparsing retranslating modules from their serialized WM form into their ready to run form on every single transaction and, that has been something we know you know we want to get back to we know we want to figure out the correct way to deal with it longer term and have longer lived caches of, that translated form and. And so with [CAP-62](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) our opening became obvious this is the clear opening where we can do it and in fact we can do the most aggressive form of it. Because there's this very large amount of data, that will be kept in memory all the time, which is essentially the entire live part of this orbon part of the bucket list so, that's you know to do levels of + +[05:00] filtering here but. If you sort of step back the contracts, that are in the live portion of the Ledger themselves collectively we've done some measurements they're not very big all told it's you know 10 megabytes or something right. Now it's not actually a ton of data even. If we saw substantially substantial growth in the blockchain sort of all anticipated scenarios don't show it exceeding memory and [CAP-62](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) really makes it clear, that there's a model for bounding, that growth and for charging for it appropriately so, that as, that growth occurs you know we can manage essentially pressure on it to keep it under control to have it have not become arbitrarily huge and, that means the caching question just gets very simple we just cach everything as soon as something is pulled off of dis we're just going to immediately compile and cache it we'll translate it into its ready to execute form and we'll just stick it in reusable cash and we will reuse, that cash from one transaction to the next. So all of the per transaction + +[06:00] costs for translation go away and, that's like, that's the whole CAP it's actually very straightforward in the context of 62. So I've talked a lot to sort of give you the background for it. But the actual CAP itself is utterly trivial there's nothing to it as soon as you turn it on your costs just drop through the floor you everything just goes faster and it's cheaper. So pretty easy sell I don't think there's an awful lot of reason to object to it unless you can think of a reason why you don't want to you know avoid doing work multiple times s it's just a freebie we really should have done this earlier on it's just we didn't know how to make it work and, and now we have a fairly clear model for making it work. So I've already implemented this I've run the experiments on mainnet running it in parallel on a local development validator and it's much cheaper substantial cost savings there are a couple of interesting Corner cases to deal with in terms of like. If you upload a contract. And then try to run it inside the transaction, that you + +[07:00] inside The Ledger, that you uploaded it in so. If you do like an upload and run immediately we have to be a little bit careful there in terms of like. When the cach gets repopulated or incrementally populated with sort new additions. But the worst case scenario of all of those is just, that you get charged today, which is the you know you get charged the cost of translating it on the fly in addition to the cash population, which we're just going to eat those costs entirely we're not even going to include the cost of cash population and eviction. Because they're. So much lower than the io cost of even bringing stuff in a note of memory in the first place. But there's a little bit of sort of careful accounting to make sure it all works. But essentially the user experience is just everything goes faster. So should be very straightforward I don't. If anyone has questions I'm happy to answer them. But I would be surprised. Because there's really not much to this CAP except cash work and + +[08:00] yeah. So regarding the edge cases can we maybe specify them explicitly in the camp. Because I noted well not quite specified and like my current understanding from looking at the implementation is, that we compile contracts specifically. When we add entries to the backet P can be Iden to the backet list specifically after running each and every transaction in The Ledger it's also SPS a weird a case where. If you have an Appo transaction and the transactions, that cost and apply this contract in the same wager or whatever is your reason. Then we basically have kind of an invocation of a contract, that does not actually appro anything. But it still won't result in a c heat so, that's right yeah I guess it is something to I + +[09:00] think gu this here I think it is something, that we probably need to specify more explicitly in the CAP with respect to the why said it's just, that in this particular case I don't think you can ever get the correct simulation result like again I don't know why would you do, that. But simulation will probably assume like. If you do not upload any contracts. Then your contract has been cashed like every contract your has been cashed and th is this through a next case scenario where it actually is not cashed and it's not in memory. So basically your fees and resources will be off and yeah I guess just worst pointing out in the CAP I can clarify the conditions I do like I think you're right in, that's, that's a fairly Edge case. Because you would have to be simulating an execution of a contract, that wasn't present on the chain on the assumption, that maybe someone will up the same transaction set as you yeah + +[10:00] I don't know why anyone does, that. But protocol kind of has to handles this anyways sure yeah and it does it'll it falls back gracefully it just is going to charge you more than you expected. And then you'll transaction would fail okay. But it's a I'll document, that in the CAP yeah just yeah y else okay, that was easy one. So I guess we move on to the next one yeah it doesn't look like there are any questions. So Dimo. If you want to get started yeah type okay yeah. So yeah C 64 it's actually the cab itself is not, that hard uation and + +[11:00] implementation wise. But I guess the discussion Ser more around the requirements from what are trying to Sol here. So for some background memos are an element of every transaction every store transaction, that are almost nonobservable by the protocol the only thing, that protocol does with transactions is well it is it has will be signed as a part of the transaction envel. But otherwise memory is just some arbitrary payload, that protocol has no concerns thinking about and it would be fine. If like it was just an arbitrary thing, that has no value attached to it maybe it's for marketing transactions or something. But it happens, that so, that exchanges are used memers from for multiaction even, though + +[12:00] a currency store has soal nukes accounts, which allow single public key have huge amount of different sub accounts, that are not represented on chain. But can be marked by some integer identifier for historical reasons most of the exchanges are just using the transaction memers and the way it works is, that. When you want to deposit tokens from your own vet to. Then change account exchange would give you two values to consider. When making a transaction the first value is well the destination account, which is a normal store account and the second value is memo usually some integer, that is assigned to your account on, that exchange. So basically in this way can maintain only a few public is they + +[13:00] don't need to create a new account for every user and for store it's actually very meaningful. Because on St there are no State less accounts like since the account is not just a public key. But literally a wer entry. So you need to create it first. So it makes perfect sense, that exchange just runs this offchain multiflection for redution the overall version for maintaining large sets of users, that's kind of the historical background. Then exchanges do use MOS For Better or Worse mied accounts are like more modern way to do, that. But not actually being used not, that much then. When tan came out it didn't care about memos for the reason mentioned before like from the + +[14:00] protocal perspective on paper M do n and this actually spawn kind of a vulnerability. If anyone wanted to use mammas in the context of the exchanges with San and I think there is a security there is post in the security sections of tore the advisory regarding this issue I probably will not talk much about it. Because like not super relevant. Because like it has been fixed and no one has been affected. But the gist of it is, that it is not possible for a custom account to sign for the transaction memo and what this means is, that for example imagine you wanted to deposit money to The Exchange from your customer + +[15:00] account even. If exchange could take St band payments without any issues you actually have no way to sign the mem of the transaction and. If you recall the your account on Exchange your deposit into to is actually combination of the some public key. So the account and a memo and yeah. When when you're making a sban transfer there is simply no way to sign for the memo and in the context of vulnerabilities, that means, that well. If you were to attach your off for the transfer to some to transaction with the memo the store transaction by definition is signed by some classic account. Because well your transfer is a thrand transfer from a custom account and unfortunately as of today it cannot sign for the whole transaction. And then anyone could + +[16:00] take your San off for the transfer and attach it to a different transaction with a different member it is obviously vulnerability but. If you think about it like it is kind of unlikely, that this situation would come up in the first place. Because well as I mentioned like you literally cannot make a stun transfer is a memo. So like the whole preconditions to this situation I kind of you're like you need have two account both of each must be fasted and only one of them is set in memo other one is not set in memo so, that's kind of the state of the things and the way like the protocol deals with the. If you potentially shooting yourself in direct by us memers with San transactions in the exchange context, that car simply does not allow such transactions to be present in The + +[17:00] Ledger they rejected before entering the mle. But we still want to have some solution for interacting with the exchanges using the custom account and C 64 is about like the most straightforward kind of solution there is hopefully the least necessary amount of work for exp me to support. Because it uses the old ways of using memos it just makes them usable from the San point and the way it makes meem usable from the San point is very straightforward you just find as a part of your San pres payload you may set some memo, that you expect to be present in the transaction and this signs for it explicitly. When you are + +[18:00] building your signature P it's basically makes it. So you actually. Now can Cree the suran transfer operation from customer account to some arbitrary exchange the account and your signature will also be a signature of your memo. And then this us payot will only be valid. If it is a part of transaction, that as a match in memo. So well you still need to have two accounts. So need to kind of trust there is no way to actually prevent the prevent there is no way to take your o, that you sign foran and attach it to a transaction with a different memo and there is basically no way to make the transfer not go where it was supposed to go and obviously the next step would + +[19:00] be like. If you were to implement it I must make it clear like even. If you were to use it like there is no guarantee, that exchanges will support it. But at least in order to support, that they just need to increase the scope of the transactions, that are, that they monitoring like. Because I would imagine like on a high level okay don't have access to your code obviously. But on a high level what they would do is they would look at the intern in of transactions somehow identify the payment. And then look at the payment transaction and identify the memo is this change they need to also look at the Rob on transer operations again mattera should look the same in terms of like having the balance of the account increased. Then is in exactly the same place of the + +[20:00] transaction as it for classic. So this is a CAP the payment operations to C address it is like a an opposite basically. When you think of interaction with the exchanges to operations you want to support it's are deposits and withdrawal mean deposit is I have my vet I want to send money to The Exchange. And then do some trading on the exchange and withdrawal means I have some balance token on my non noncustodial exchange account and I want to move it back to my crypto wet, that I own this is withdraw and payments to C address are actually about supporting this DS. So it is kind of related. But it's not in scope of this C and + +[21:00] I'm not sure like you're doing it for this protocol or we doing this all. But current CAP is specifically about the deposits yeah. So this is basically what is proposed and there are obviously Alternatives such as like hey why not use nuk's accounts and as I mentioned L ex changes don't affort them anyway. Then hey why not come up with a solution for Tran multi Flexin and again exchanges don't use it yet and it is e compling and U it probably requires much more design work and, which is no very obvious way of how to do motortion with turbance it is also meaningful. So what this C does is basically kind of fixes an immediate issue we have. But it + +[22:00] does not close the doors for future modifications or coming up with the new ways of M reflection accounts and for on. But this one seem like the best thing we could do, that is like it has probably the biggest to up is the existing infrastructure UC approaches, that are out there everything like other solutions would be probably much more Divergent from what happens. Now and kind of releases the and adoption okay I think just it for the overview. Now this my yes Mamas and events are currently not specified by exist CAP does not + +[23:00] talk about the events, which makes sense it is about mem memers. But the CAP, that we have to edit for the next week, that is two under the active discussion is about trying to generalize the events between classic and San. So meet metadata protic operations in they format, that you don't need to write custom person Logic for classic andand and may be a part of this event stream in some Cas. But yeah I don't know. If they want to discuss this. Now or not the part we have disc yeah and yes in General on more I guess the main phos + +[24:00] question about this C is want not support members at all. Because they're kind of bad and people have isses SYM and I think there is some mar Just. Because of D reasons and. If other folk disagrees andbe Pi up oh in the chat maybe someone wants to camp + +[25:00] on the stage may be as well call Caper still has mem in the system domain yes it's basically I would say my opinion this is, that memers do exist and they are in use for. Now for better worse and it makes sense for San to support them for the + +[26:00] time being. If we have a better generic solution for, that we probably should try to come up with this separately and just try to the CRI. So my expectation is, that it will take a long time before I this P it deated to. If anyone wants to speak up maybe face. Because creting, though everyone is typing + +[27:00] sounds like there's a there's more to discuss related to the memos. But it doesn't it should affect [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md) right yeah I feel like yeah they. When discussion can be contined synchronously. But before I move on like I just want to ask everyone again like. If anyone has any strong opinions in terms of why you shouldn't do [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md) go. Now yeah great yeah is I understand the [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md) as it is very narrow the + +[28:00] the use case is it's hyper focused on addressing the existing classic use case and it's really surgical just to address the problem there is there and I understand like you know toas bring out you know why not solve this in a way, that works for more than, that yeah and I get, that I guess my question is like there are some advantages. So even I mean obviously like. If we can solve this in a more generic way, that would be pretty cool and I we probably can. But there are also some advantages. If we do solve it really surgically. Because we sort of can rework back to the transaction memo being something, that you can rely on and + +[29:00] so I think, that's valuable. But maybe, that's not valuable. So I'm not actually sure to. If you're saying, that's actually not as valuable as what I think it's valuable or what some other of us think is valuable. If we do think, that's valuable is there anything about this proposal, that prevents us from loosening this constraint in the future like. If we go ahead with this proposal the network adops the proposal and Transaction memers what exchanges can continue to rely on in a year from now or six months from now can we loosen the requirements so, that every or can have its own memo or something like, that like is, that are we sort of going down a path where it's going to be really difficult to walk back right. So there are Thal things here let me start with the why don't we drop the requirement for text member to my, that transaction m I don't think it's a good idea + +[30:00] because unlike the memo basically the transaction memo and You observe a successful transaction without doing any additional work you can say, that parties have all the parties have times this memo and in case of app 64 you can also say, that the Rel or par have time for this member as well. If you have a transaction, that has some s on o and there is no requirement for the memers to mat the transaction memo. When you're looking at the transaction you cannot tell anything about the nature of this payLo. Because like besides like minor space optimization I think the main conten this comes up in is ay why don't we use it for to allow something like payments to multiple ex destinations at + +[31:00] once in the same transaction well the issue is, that. If you keep relying on the transaction like in case of memos you actually don't have any confidence for any given us the it has been used and the member has actually made the P go through. And so on. So basically you put a lot more Demand on find all, that pieces of data together Downstream system. So not only do they need to jump from the event to the transaction. But they also need to make sure, that there are events for every us present in the transaction and everything matches it inst nothing basically yeah there is no extra P or something and I think it's really food guy and like. If + +[32:00] someone forgets about this tck and they can be tricked into thinking, that something has been transferred to them by attaching an extra entry of tbos with some extra memo maybe you know might as a destination account of some methods in there something. So basically it is like given, that s us is really flexible. Because like really it has to support custom accounts and you do not verify it beforehand you can only verify it during the execution actually been consumed I don't think it's a good idea to just put memo in there without any size to the actual transaction. But so here basically like the fact, that the T the transaction M ures the fact, that like there is no ambiguities there in terms of like + +[33:00] memers are actually getting and I recognize it's really restrictive. But I think it's also for the better for. Now and for a more General solution I think it should have something to do this day event, that you don't need to do any work Downstream trying to figure out like. If this is the right memo right M reflection ad for this particular event I think instead event should explicitly tell what is this mul ID, that confirms this particular event and I think this is why this whole discussion about the events have turned up. Because like. If you were to come up with solution, that is event based. Then it will probably make sense to also involve the current members the events to just keep the scem across all the different types of operations okay + +[34:00] now I need to read the comments again and yeah I think. If yeah to the question. If we can relax this requirement I like given the security implications I not sure it is a good idea. But again just said I'm not sure it's a good idea in I think you need a bit PR approach unless of course we figure out how to TI The Memo from the ask and to the event and the sure it is possible at all + +[35:00] so. If a question about like centralized exchanges would need to make some code changes either without change we is, that without, that memo change we actually basically some sort of M support is a requirement to support deposits like it's not the only thing to do. But it is a requirement. Because if they can of do multiflection I think it is very far to ask them to create an actual physical account for every user and accable for the network well. So basically like. If you don't propose any multiaction solution at all I + +[36:00] think there is no way to ask ches to implement anything we need this something and the argument for this CAP is, that well this Fon is very close to what they already doing whereas. If you try to come up with some another more General system, that's also contingent and adapting. So this whole system whatever it is I don't know yet, that's what it is e + +[37:00] going ont try some context like a recter may have been some discussion here on Discord about supporting mixed accounts + +[38:00] in San in general and the trickiest thing about any sort of multile seran and the reason for why we are not like coming in with some maybe more generic multilex app you're not coming out with one yet is, that since in sban you kind of manage your own storage it is pry to distinguish between cases. When you do care about multiflection and you don't care about mation for example consider a classic mixed account let's say you have a transfer let's say we just add it to the thron address well. When you throw the balance in your for example in your custom tokens for Multiplex account. Then you specifically want all the multi account for the for time there the same + +[39:00] key but. When you're in an event you specifically want the event to cons to contain the multiplex account destination and. When you verifying us you also want to make sure, that you're sending, that the transfer happen to the exact Multiplex account, that has been specified in the transaction and I'm going not show you there a generic way of basically for the contract developer to be aware of like. When they should or shouldn't treat accounts as multilex and this is the primary reason why like mix accounts are not supported by San by turban at all today and also is a reason for why like there's no great solution for Mar collection in foran yet. But think function, that is + +[40:00] not doing anything the contract player maybe. But again I'm not sure yet how would this work like there another argument for why we have doing something simpler. So right I think. If like do we have anying topics, that need to be discussed now. Because I guess we can continue as the man Horizon discussion I think hly yeah I'll leave a link to the discussion + +[41:00] the chat and we can continue there. But it doesn't sound like there are any more immediate questions for [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md) at the moment yeah. So HP yeah let to force I've expressed my opinion we can continue discussion the discussion, that. If some yeah we can continue discussing. And then, that thread and sounds like, that's it for today. So thanks gr and Dima for presenting and thanks everyone for listening we'll see you next week unless Le George have any questions. So typing + +[42:00] all right, that's it. Then see you next week + +
diff --git a/meetings/2025-02-06.mdx b/meetings/2025-02-06.mdx new file mode 100644 index 0000000000..420375c095 --- /dev/null +++ b/meetings/2025-02-06.mdx @@ -0,0 +1,300 @@ +--- +title: "Decentralized Finance Infrastructure and Protocol Scalability Improvements" +description: "This overview highlights Soroban smart contracts, Horizon API, and Soroban RPC." +authors: + - carsten-jacobsen + - dmytro-kozhevin + - george-kudrayvtsev + - leigh-mcculloch + - orbitlens + - siddharth-suresh +tags: + - developer + - CAP-63 + - CAP-64 + - CAP-67 + - SEP-40 + - SEP-41 +--- + +import YouTube from "@site/src/components/YouTube"; + +## Decentralized Price Feeds and Multisig Transaction Coordination Tools {#part-1} + + + +This session focused on core DeFi infrastructure being built on Stellar, with presentations from the teams behind Reflector and Refractor. The discussion centered on providing secure, decentralized price feeds for Soroban-based DeFi protocols and simplifying multisig transaction coordination for DAOs, treasuries, and shared accounts. + +Speakers walked through real-world use cases, architectural decisions, and live interfaces, emphasizing how these tools reduce risk (oracle manipulation, operational errors) while improving developer ergonomics and composability across the Stellar ecosystem. + +### Key Topics + +- Reflector decentralized oracle overview: + - On-chain price feeds for Soroban DeFi use cases (lending, derivatives, stablecoins, asset management) + - Designed to mitigate oracle manipulation, a common DeFi attack vector +- Oracle architecture: + - Governed by a DAO of reputable Stellar ecosystem participants + - Price aggregation via a quorum of independent nodes + - Updates written on-chain at fixed intervals (e.g., ~5 minutes) +- Standards and integration: + - SEP-40–compatible oracle interface + - Oracle data stored directly in Soroban contracts for efficient access + - Optimized to reduce contract calls and execution costs +- Free vs subscription model: + - On-chain price feeds are free for all developers + - Subscription service offers: + - Higher-frequency sampling (up to ~1 minute) + - Custom triggers based on price deviation thresholds + - Webhook notifications with cryptographic proofs +- Reflector token and governance: + - Token incentives for oracle node operators + - Used for DAO governance (adding feeds, members) + - Required for paid subscription creation and upkeep +- Roadmap highlights: + - Additional price feeds (FX rates, commodities) + - Improved documentation and developer tooling + - Cross-chain expansion of the subscription service +- Refractor multisig coordination tool: + - Pending transaction storage and signature aggregation + - Designed to simplify multisig workflows and avoid coordination errors + - Detects required signers, thresholds, and validity automatically + - Supports multiple wallets and offline/air-gapped signing + - Open source, free to use, with a public API for integration +- Refractor use cases: + - DAO operations and governance + - Treasury management + - Custodial or shared service accounts + - Safer review of transaction intent before signing + +### Resources + +- [**Reflector**](https://reflector.network) — [Meridian presentation](https://youtu.be/v=KFRF12c-RSU) +- [**Refractor**](https://refractor.space) — [Blog post](https://stellar.expert/blog/multisig-aggregation-with-refractor) +- [SEP-0040: Oracle Interface](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0040.md) +- [SEP-0041: Token Interface](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0041.md) + +
+ Video Transcript + +[00:00] Okay hello everyone welcome to this week's Stellar Developer Meeting and today I have a couple of guests. So let me bring them onto this stage and first of all thank you for joining please go ahead and introduce yourself I think you are well known in the community at least from your names and your projects. But please thank you for inviting us people know me as OrbitLens I am the lead of the team behind StellarExpert albo Reflector and some other ecosystem projects and today we are here with RF the one and the only RF yeah. So I'm Raph and I've been doing some business development work with Reflector for a bit here helping + +[01:00] doing some Outreach finding some debts would be a good fit and this works really well with our other project I do kind of marketing for seller projects through Lumen Loop. So just kind of just a big Outreach guy and. If you want to know what's going on in the community come to me. But it's just really great to be here and be able to talk about two really good products and kind of jumping into it a little bit. But we're going to be talking about Reflector and refractor both things, that are incredibly useful for devs looking to build decentralized way on Stell great yeah take it away I'm looking forward to this [Laughter] [Laughter] presentation. So Reflector is a decentralized price F article for the Stell DeFi smart contracts and it supports you know Financial derivatives lending borrowing protocols algorithmic stablecoins Asset Management contracts and you know there's just anything, that you want to do with price feeds you can do it + +[02:00] so why are oracles important. So price manipulation is probably one of the most common vectors of attacks. When you're working on DeFi almost every single week we hear about successful exploits. So the rule of sum is. When you write DeFi contracts you have to you write DeFi contracts top oracles and you got to make sure, that you know you're choosing these price feeds wisely. So again you're only as good as your Oracle. When you're kind of sourcing your data from somewhere else and kind of the really cool thing about Reflector is, that it's really built on some known brands in the ecosystems it's a def decentralized consensus, that we they're agreeing on these price feeds. So it's controlled by a multisig of reputable organizations on Stellar and it really helps guarantee, that reliability fault tolerance and making sure, that those price feeds are updated and secure Reflector kind of relies on, that Quorum of nodes and you can kind of see it in, that picture on the dock there and it's + +[03:00] connected by seller core validators next slide there. So we kind of combined two different Oracle approaches on chain feeds and subscriptions you can kind of think of it as kind of like the like we just have the price feeds rest the free. And then subscriptions, which is kind of like a pro level of these price feeds what's really cool about Reflector is, that at its base it's free like all, that data anybody can plug into you don't really need to pay for it but. If you do there's some really cool things, that you can kind of set for yourself some automations and some Communications some notifications for, that could be useful. When you're developing. So onchain price feeds are free for everyone the interface allows for a SEP 40 compatibility standard all the price feed data is stored in the contract itself in a fixed time frame I think free is interval of every five minutes. So the data is readily available for all consumer contracts the use Oracle quoted price results through directly in + +[04:00] the same function it's efficient it's optimized low fees and it greatly reduces the number of contract calls. So again we're trying to make sure, that it's efficient for your DeFi apps to kind of plug into offchain Data or classic data subscriptions on the other hand provide kind of like a more fine grain data access with kind of user defined customable customizable triggers with up to one minute sampling, which again unlocks a lot of use cases with, that one minute sampling this service provides kind of access to a much wider range of available price feeds private web hooks invocations in combination with onchain execution proofs. And then it guarantees again stability and security. So highlights the Oracle contract provides a kind of comprehensible interface again for SEP 40 the ecosystem standard and on top of, that kind of offer like automatic cross price calculations utility functions tww AP range approximations last. But + +[05:00] not least onchain data is again like we said it's completely free. So kind of go ahead and use it I feel like doing Onre is such a sorry we're doing Outreach is kind of a weird thing. Because we're trying to sell devs on a free product go use it's there we hope this way you know more you know sbon Contra contracts can build kind of safe and reliable infrastructure and kind of, that main goal being we want you to use a system, that's safe and, that's kind of why these nodes, that we've kind of we work with are incredibly important, that we want to make sure, that you know you like these or these node providers are securing everyone's DeFi apps. So they're just kind of incredible importance with you know making sure, that consensus mechanism and, that decentralized nature of re Oracle kind of services those DeFi UPS okay. So and, that kind of brings us into the Dow. So you know it is a it's a cluster it's governed by + +[06:00] these organizations and they kind of they serve these and maintain these Reflector server nodes and they participate in this cluster consensus. So again. When you want to get an asset listed this price feed listed you really have to U make a proposal to the dell and submit with the tokens same with adding different kind of price sources again you got to go to Dell and the Dell make the decisions none of it is done by one entity and everyone needs to agree on it and again, that kind of helps secure, that you know all these organizations are key players in making decisions and, that brings us to the Token again the token is what kind of go makes us able to participate in the Dow cluster operators receive actually tokens for participating in the consensus mechanisms and they provide like computational resources to aggregate validate certify. So again it makes them to pay them in token + +[07:00] correspondingly like these tokens are correspondingly acred tokens represent the equivalent of computational resources contributed by each party these tokens are. Then used by the cluster for governance token and subcription services. So the xrf tokens can be used in cluster governance voting on new price feeds or new cluster members invitations and the again serve as the backbone for the sub subscriptions and they're required to create C create custom subscriptions and. And then pay for the upkeep of these subscriptions yeah and here we have a few prerecorded videos with the interfaces. Because we have a lot of them I think I will start with the no admin dashboard interface, which is the interface, that displays current configuration for cluster nodes there are public Keys and we. So addresses + +[08:00] for each server below you can see stats for each note other sections contain the list of direct assets for every orle contract and configuration changes history logs Etc life price feed, which is produced by the Oracle is available on The Ledger and on our website collector. network here you can see update history for every Oracle supported by the cluster for both pabn net and test net ler updates are stored on the L itself with regular five minute intervals and immediately after, that available for all consumer contracts, that utilize our price fees and sorry and the next one is the + +[09:00] interface of subscriptions is the more advanced solution, which is a kind of a Swiss arment knife for any developer who decides to build any kind of Oracle based Solution on Stellar or some other chains. So subscriptions is a service for user defined triggers invoked automatically once the price deviation reaches a certain threshold every Oracle not independently Eves subscription condition once a minute and consumers receive web hook notifications on triggered price changes let's see how it works in the web interface first we need to select data source for base and quote symbols the quoted value is calculated as quot price divided + +[10:00] by base price there are two options first option is aggregated price from exchanges and price tokens from Stellar public exchange for instance here we select arbitrum token from exchanges. Now the base scker we can utilize the same data source or qu prices for a pair from two different sources this provides the ability to build a personalized Oracle on assets from different feeds and different asset classes the cluster will calculate the prices automatically here we select yxm and Trigger threshold here it controls the Target price deviation amplitude this example it means, that web hook and point will be notified every time the quoted price goes up or down by + +[11:00] 2% com compared to the previous dispatched value Reflector also periodically sends herbit messages with current quote price to let the Upstream service know, that subscription is still active to activate the subscription we need to pay for it with xerf tokens and they also required to pay the upkeep balance gets changed gets charg on the daily basis it is worth noting, that the daily fee depends on the quote complexity and herbe interval here we can use web hook demo website to show how it works we'll just copy paste in the web hook URL and, that's basically it the account, that created the subscription will become its owner we need to confirm the transaction and it will be + +[12:00] it will automatically create the subscription. So basically all the setup everything is done in the web interface here this description page we can see all key parameters for each of the account subscription as well as remining up keep balance subscription consolation removes it and reclaims reming tockets and here on this interface you can see the nod notifications, that come from the cluster there was a lot of similarly identical post requests yet they have different signatures each Reflector cluster node sends notifications to Consumers to provide redundant and + +[13:00] reliable proof of delivery as we can see here every incoming request contains information about the subscription quoted price and update time stamp. So what's next we actually developing the protocol and at the moment we're currently working on adding new price feeds for foreign exchange rates and commodities website improvements is another important task for us we need some more quote samples better documentation and developer support to make the integration for new CER as smooth as possible. Because sometimes R needs to go here and there here and back again to explain something and we need to basically explain some very basic things about + +[14:00] oracles and our particular Oracle model and of course the next huge milestone for us is going cross chain we want to expand our subscription service Beyond Stellar. While the Forum and do smart contract logic will be working on Stellar network. Because essentially subscription service is chain agnostic and U any service any chain or even without any blockchain can receive these price notifications they just need to set up their subscription VI the web hook interface, that's basically it about Reflector we definitely want you to come to our website to join our Discord + +[15:00] and to talk to DS talk to other people and check what you can do with oracles. Because Reflector is the ultimate price reference for Star defe I think one of the big things too is you can see it out in the ecosystem already like you can see it in blend like there's really cool protocols, that are starting to build on to Reflector's. So again go see what's out there you know. If you see what you like maybe you're want to play around with Reflector and once you're ready go check out the Discord or you know reach out to one of us and we can point you in the right place yeah totally we can answer some questions now. If you have any or we can jump yeah. So anyone, that has any questions feel free to ask I saws he was asking about the QR code, that at the end of the Pres, that's great but. If anyone else has + +[16:00] questions we still have a few minutes. But I would like to ask a couple of questions I'm thinking what made you start build this. Because this is not just a small pricing price feed Oracle this is pretty comprehensive I mean everything from the security to have it managed by da how did you get started on working on this and did it start well project you know we had a conversation with to at the Meridian about two years ago and it was like what do you want to build on seran well we have Stell expert we are experts in data processing. So Oracle seems like an interesting thing to build. But well right. Now maybe I'd reconsider + +[17:00] this decision. Because it was a pretty bum perod and like I am super grateful to all our Dow members who joined us in this Challenge and task and supported us. Because they are actually the ones who runs the who run the n and protect the protocols on chain currently we have more than 15 million dollars protected by our oracles and it's like def fact Oracle solution for Stellar network I am grateful to everyone who supported us with this great I think we have a couple of questions mat has a really good one about the + +[18:00] hackathon yeah, that's a pretty good one. So we have we've had two Dev competitions up-to-date just to kind of help us highlight how the oracles can be used and kind of get devs playing with it we had best Oracle contracts we had Lena, which is like a trustless loan platform, that won one of the prizes we had Pula Labs, that won a trusted portfolio manager, that helps kind of rebalance bance your portfolio automatically, which is kind of cool. And then we had kale kalepail, that did his Reflector predict like betting on prices through an oracle. So those were the three winning ones we had a bunch of walkthroughs and text writeups as well we had some you know some people, that built some Reflector clients some boiler plates notification Bots again there's just kind of a lot of stuff there and it kind of. If you're interested in Reflector and seeing how it can be used going to the Discord and seeing the submission + +[19:00] sections is a really cool like list of different examples great yeah I have seen. So I've been to a couple of hackathons. Now and I've seen the Reflector being used and I know you mentioned kpl I know we use it in general SDF. When we build cool stuff fun stuff. When we experiment. So but it is I have seen, that both slender and blend have used it too is, that something you are driving something you are seeking out or did they come to you and say this looks like a really neat service well it started with u this first meeting with st and people. When sban was still in bet and not even on mayet yet. So actually we partnered with some key players at the + +[20:00] time okay great could we maybe just kind of I think we're running out of time. But let's jump into maybe do some just a little touch of refractor. Because it is I will do pretty quick yeah. So refractor is a pendant transaction storage and multi aggregator for Stellar network it's a developer Focus service in the first place. But anyone can use it to store trans ctions and G signatures required to match sign thresold, that's the service, that been around since 2021 and like many active Dow organizations are using it like Aquarius Reflector yel blocks at some point. So it's a very handy toolkit for D Administration join castol accounts secure scroll server Services treasury + +[21:00] Management Services like any kind of service, that utilize Stellar multi why you might need it It's tricky to check what inside the transaction what are the operations requir signatures and something like this. When you just get some random Exar encoded transactions and someone asks you to sign it G signatures requires sending a transactions via Messengers orail or some other channel, which is like not. So convenient let's say it. If two or more people sign simultaneously one of the signature can be lost. If someone signs the wrong transaction everyone needs to start from fresh not enough signatures to M the threshold + +[22:00] and transaction will be discarded by the network too many signatures it will also be discarded by the network it also requires coordination. So someone needs to oversee the process like contol the signatur submit some transaction submit the assigned transaction to the network Etc. So we've been there and, that's, that's actually a painful experience like once you start using multiseq at scale it's problematic U refractor allows you to basically upload your transaction XD web form just copy paste transaction exr check selected network click save and, that's it optionally you can choose Auto submit to the network option, which will allow the system to + +[23:00] automatically submit transaction to the network once it gets enough signatures for it and after creating you can simply share the link with all the signers and, that's it, that's how the transaction looks right and we can see here, that it's much easier to understand what operations are inside what will be the execution result what are the probably probable consequences and Etc pinted transaction page also displays all transaction details current signing status and automatically detected validity period and the key part of it signatures Reflector can automatically detect + +[24:00] all possible transaction signers their weights and total transaction threshold it will show who else can sign and it basically prevents the situation. When too many s signatures were added and transaction again be becomes valid here for example you can see like real world examples from Real World signature with different thresholds how it looks in the process of signing and how it looks. When it's fully signed web interface supports multiple wallets. So there is an option to sign the transaction Visa wallet of your choice or import assigned ex directly. So in case. If you have air gab device or something like this you + +[25:00] can just copy paste XDR there and Reflector will combine valid signatures discard duplicates and remove all inapplicable signatures. So it basically makes the process much more streamlined it's free service it's free for businesses and individuals without limitations we have some rate limiting. But you can use it for free it's been around since 2021 it is supported by our team it receives some regular updates. So it's been pretty reliable servers it is Battle tested as I told it was employed by different DS and it has open IP. So you can open II you can integrate it directly into your application or service + +[26:00] and best of all probably it's open source everyone can check what's inside or run on instance and work with it. So it's pretty handy tool anyone who will be managing Dow some kind of multi setup account you just need to check it and maybe it will be right for you, that's, that's I think it's very interesting and I think multis is not necessarily something, that's easy to do. So so having a tool like this I think, that is super interesting I think actually multii maybe we should do a separate presentation on, that or separate talk about, that. Because you guys definitely have some experience with, that. So yes. So maybe I think, that could be an interesting topic for future presentation. But for. Now we + +[27:00] unfortunately out of time I need to wrap it up thank you both of you for joining it was super interesting to learn more about what you're working on and yeah can just encourage everyone, that needs pricing feeds to start using it I mean it's super easy and it's free and it's something, that allows you to scale with your application as well into maybe a subscription based model. So thank you for joining thanks for having us okay talk to you later thanks everyone by bye + +
+ +## Parallel Transaction Scheduling and Unified Asset Event Streams {#part-2} + + + +This protocol meeting covered two Core Advancement Proposals aimed at improving Stellar’s scalability and observability. The discussion explored parallel transaction execution for Soroban-heavy ledgers and a unified event model that aligns classic asset operations with Soroban contract events. + +The proposals focus on making transaction processing more predictable at scale and simplifying how downstream systems consume asset movement data. + +### Key Topics + +- CAP-0063: Parallelism-friendly Transaction Scheduling + - Introduces staged and clustered transaction sets + - Transactions within a cluster may have dependencies; clusters within a stage do not + - Enables parallel execution across CPU cores while preserving determinism +- Scheduling model: + - Uses transaction footprints to detect data dependencies + - Limits total sequential work via modeled instruction counts + - Network-configured limits define expected parallel capacity +- Benefits: + - More predictable ledger close times under load + - Better utilization of validator hardware + - No change to transaction ordering guarantees or fairness model +- TTL handling changes: + - Adjusts how TTL extensions on read-only entries are reconciled + - Ensures parallel execution does not introduce nondeterminism + - Downstream ingestion must avoid decreasing TTL values +- CAP-0067: Unified Asset Events + - Emits standardized asset events for classic Stellar operations + - Aligns classic asset behavior with Soroban/SEP-41 event semantics +- Event model changes: + - Transfer, mint, burn, clawback, and fee events emitted consistently + - Correct handling of issuer edge cases (mint vs transfer, burn vs transfer) + - New address types for claimable balances, liquidity pools, and muxed accounts +- Meta format updates: + - Introduces transaction meta V4 to generalize events per operation + - Events can be replayed from genesis for full historical consistency + - Intended to reduce downstream ambiguity and duplicated logic +- Open discussion areas: + - Memo handling and muxed account representation in events + - Tradeoffs between emitting events vs reconstructing state from meta + +### Resources + +- [CAP-0063: Parallelism-friendly Transaction Scheduling](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) +- [CAP-0063 Discussion](https://github.com/stellar/stellar-protocol/discussions/1602) +- [CAP-0067: Unified Asset Events](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) +- [CAP-0067 Discussion](https://github.com/stellar/stellar-protocol/discussions/1553) + +
+ Video Transcript + +[00:00] Hello everyone welcome to this week's protocol meeting we have a couple CAPs to discuss today and I'll start by presenting [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) I'll share the link here. So this CAP is about emitting events in Stellar in the same format, that we see in sorine for Stellar assets so, that a user can use the stream of events track balances and this CAP covers three separate High higher level changes they're listed in the abstract of the CAP and I'll first go over the two smaller changes, that make some semantic fixes to the seller asset contract events for compatibility with SEP 41, which is the token interface Set. So the first is, that we're going to stop emitting the admin on the mint and clawback events the admin is an implementation detail of the sa and does not belong in the event the admin doesn't even exist in sub 41 as a concept + +[01:00] the second change is, that we're going to update an edge case with regards to transfers to and from an issuer the sa was written in a way, that matched classic payment semantics where a transfer from an issuer Min the asset and a transfer to an issuer burns the asset the issue is, that in these two scenarios we still emit a transfer event instead of the appropriate Mint or burn event. So this CAP just proposes fixing, that to Mint the mint the correct Mint or burn event instead of the transfer prevent. So I'll pause there real quick. If there are any questions about those two changes before I move on to the bigger change at hand all right. Then I'll move on to the third and most significant change of this CAP, which is emitting an event in the same format as the Stellar asset contract for any asset movement and this means, that a Stellar operation, that results in the movement of an asset can emit + +[02:00] a transfer mint burn or clawback event and we're also adding a fee event to represent the fee paid by The Source count. So the CAP specifies what the events for each operation will look like. But I'll just mention some interesting points. So a trade, that occurs through either through an offer operation or path payment operation will result in two events one for each side of the trade and the two parties on both events will be the source account of the operation and the owner of the offer and it's and these events can be transfer events. But they can also be instead be a mint or burn. If the issuer is involved we're also adding new SD address types to be able to represent the claimable balances and liquidity pools as from or two addresses in these events and this allows us to represent movements to and from these entries + +[03:00] yeah. So silence asked can you explain removing the admin from the sa mint and Clack events. So currently on the mint and Clack events of the sa not in SEP 41 the admin is one of the topics of the event. But this is this isn't necessary and the concept of an admin is not required for a token exactly yeah it's not we're not removing the admin from the sa just it just says the topic we're removing it from the topic of the event all right. So yeah we're also in addition to those two new address types for clal balances and liquidity pools we're also adding another SC address type for MX accounts so, that these new events can pass along the MX information, that the MX account is used in the operation or transaction it's also another thing to + +[04:00] sh screens. So I'm actually not presenting this in the order, that it like it shows up in the CAP this is just some points I thought were interesting but. If you look at [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) these points should be listed there at a with more detail. Then yeah. So I wanted to mention, that these new events will not be Into The Ledger. Therefore will not be part of the protocol and we also will emit the emit these events from Genesis. If you replay from Genesis and something to keep in mind here is, that there was a bug in a very old protocol version where XLM was minted and burned and we will admit events, that will allow you to reconcile those Balan changes and finally we will be reworking the format of TX meta with the transaction meta V4, that generalizes the events and separates them by operation + +[05:00] previously the events were for formatted in a way where was very specific to Soroban and the fact, that Soroban only has for a single sbon transaction there's only one operation. But the transaction meta V4 generalizes, that and the plan is to provide a config flag, that enables the emission of this new meta format okay. So let me look at some of these questions. So will this resolve the issue with a locked issuer sa having an admin Sil can do things the issuer can't lock no. So I think what you're asking is. If you change the admin and you lock the issuer the ad okay ignore, that yeah they the admin can still do whatever it wants + +[06:00] to what else does back filling mean, that replaying with these new events enabled will emit meta4 for all ledgers rather than just post 23 yeah, that is the goal and we and, that should work I we probably have some stuff to work through there but. If we want to admit these events from the beginning. Then it has to be meta V4 see all right. So MX addresses and memo. So yeah one of the issues is. So the CAP addresses the M partially by forwarding the M addresses into the events. But there is another question about memos, that I'll link this discussion + +[07:00] topic from Nico it's relevant and at a high level this amounts to considering a solution where you can group an arbitrary memo with a group of trans events this will allow you to you know solve some issues we've had with in the past with regards to the design of memos but, that. But yeah I don't know. If Lee. If you want to discuss this a little bit more, that's something this concept is something we still have to discuss yeah good yeah. So yeah NCO may want to he this is sort of I feel + +[08:00] like his idea. So he may want to speak to this as well. But there you know we have the separate we have another [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md), that we're not actually discussing today, that's doing some things with MOS it's touching MOS and where they show up where they will show up in tooling and you know making it possible to do a soran transaction with a memo and have, that safely included in the signature so, that's [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md) and something, that niik was bringing up with this CAP is, that you know right. Now we're in this moment where we're making some significant changes to events or we're doing a lot of work around events and doesn't make sense you know we have to sort of we sort of have to get close to Memos. Because we've got this issue with moer dresses and you know right. Now [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) in a very non-controversial way is just + +[09:00] suggesting, that these events. Now might contain addresses, that are MX addresses. So we're just going to pass it through so. If you do a payment operation, that's got a destination of a Max address, that address field in the San event could be a MX address it could be an M address instead of a g address or a c address and I think, that is like very not noncontroversial it's sort of like straight line what we I guess we would sort of expect the CAP to do in the situation. But there is this we have this history where you know once upon a time there was memos. Then there were M addresses right. Now the ecosystem is pretty there's you know both get used in the ecosystem. So both getting used by exchanges and but. When you look at the data tells a somewhat confusing story. Because this is memo field there's this MOX address they can be in the destination you know technically sources can also be + +[10:00] addresses and nik's Nik was sort of suggest I don't know. If this was actually a proposal. But he was suggesting like do we do something more here do we make these events in this moment more opinionated about what the memo for this event is and you know do we do something like. If there's a transaction memo this event has a memo is or. If there's a destination of a m address do we pull the memo out of the address and we say well actually, that's the memo for this event to some degree like the ecosystem has to do this somewhere and you know right. Now it has to do this Downstream ecosystem tooling has to make a decision about where these events come from and in the future like you know right. Now memo memos there's like MX addresses are only a classic thing memos only occur at the transaction level in [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md). But in the future like how do you attach + +[11:00] memos to like other groupings or transfers. If there's you know transfers being bundled together yeah sorry I'm not sure I'm actually doing a great job of summarizing this there's like a lot yeah it's definitely something, that still needs to be thought through. But I guess the question is should we explore yeah addressing, that issue in [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) it sounds like we should at least at a high level I think we should and I think what nio just posted in the chat is a really succinct way of for the why we should consider it. Now and, that is, that you know [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md) is forcing the memo to be exposed in down TR systems for San events. And so it makes sense since this CAP is very focused on, that consistency between classic and Soroban and fixing it across those two dimensions of you know San + +[12:00] versus classic sack versus SEP 41 it makes sense to discuss. Now yeah makes sense okay orbit asked this question why did we decide to extend the meta with the vents. If these events can be commuted dynamically from the existing meta seems like redundant data in the meta and this is something we actually discussed internally I believe George actually mentioned the same single source of Tru truth rather than risking bugs Downstream renting the wheel and dynamic compute also Lee do you have I say you mut yourself do you want to address this yeah I can speak to this the a large part of this is, that you know. When we look at the tooling, that we have today. So we have a Horizon, which takes a lot of What's in the mattera today and creates this data model for people to consume this new data model for people consume. And then St RPC was designed with a different + +[13:00] approach where the Stellar IPC really exposes the raw network data and tries to introduce as little as possible in terms of its own data model on top of, that and one of the reasons, that Stell IPC was designed, that way is, that you can go to you know you can take contract events, that have come out of star RPC and they look the same way as. If you go and take meta out of core or. If you use G Galaxy to you know generate a data Lake of meta like the events look the same way in all these different places except. If you go to Horizon you know Horizon has its own data model and it has, that effects data model, which is you know very different. So the idea between the ibr like how like why bring these events to Classic and the Stellar asset contract and the CL operations is to create this like one + +[14:00] unified view, that appears everywhere, that looks the same everywhere, that the data occurs okay I hopefully, that answers, that question but. If not let us know I think Lee you mentioned another thing about another question about asset names on the topic of the events right. Because the currently go ahead maybe before we move on it does sound like orbit has some things I'm Orit do you want to speak to yeah hello everyone just wanted to + +[15:00] add my two Sense on this we've been successfully reconstructing everything from transaction meta XDR on the client side basically our inje pipeline in Stell expert is built on top of the library, that we built specifically for this purpose and we had a conversation with the Horizon team before like several years ago I think where I proposed to actually remove all this excessive data tables like effects and everything like this. Because all of it can be reconstructed on the client side of course there are very specific issues like the memo case lay covered before, that + +[16:00] but maybe we can just like conduct some research on it and maybe the Parson Library we already have and, which have been tested for years and, which works with classic and San maybe it's enough. Because on the client side people can just use it directly in JavaScript to parse the response they receive from DC Horizon. Now doesn't support transaction meta. So it's probably case. When we cannot use it directly. But at least from RPC it has zero problems is + +[17:00] Parson looks like George has something to say. But I can't bring him up on stage there oh okay yeah I actually didn't have anything to say I think it's a I think or makes a good point. But I do think, that a unified event stream is better for like your average dApp developer even, though we can do all the indexing on the Fly it's probably more catered towards like infrastructure providers like StellarExpert and Horizon. So this is still going to have value for people who are just interacting with RPC directly I mean we've been using it to actually display transactions in Stellar Expert be wallet + +[18:00] refractor everywhere I'd say it's a universal approach, that can be used everywhere I'm not insisting on it just seems to me, that transaction meta will be even larger after, that and it might have some duplicated data. Because of this can you hear me well yeah I can hear you yeah okay great. So I have a couple thoughts on this first. If the meta size increase is concerned I think I don't know. If we made it explicit anywhere. But I think this should be optional for the most part at least I don't see a good reason for not making Z + +[19:00] so I don't think and the same goes actually for The Ledger changes ideally you would be able to pick one or another depending on what is your Downstream processing whatever suits you. Then regarding the library I wanted to point out, that well a not wa JavaScript is not the only language out there. But also there is like some Nuance to like there is very Nuance difference between Ledger divs and events. Because events tell you what exactly has happened during the transaction whereas weder deeps tell you what was the state after the transaction and I think it matters more for soran. But I don't know it might have some interesting implications for classic I don't know. If they're interesting to anyone. But just, that the format of the event like. If you're processing Samson you + +[20:00] might be interested in processing Samson as a stream of specifically events right and not a stream of some data types, that depend on the operations like can see from the library correct me. If only them missing something there. But this don't seem to me like this stand and the idea here is, that you know. If I wanted to track just the movement of the balance and I not particularly interested in the exact semantics of each and every store operation what I could do is I could just ingest the event stream and track the balance for a given account, which I think may be interesting to some consumers. So yeah this are just some thoughts on what why buer doing this at all I think the argument regarding + +[21:00] the event stream is pretty solid. Because this way the server in this case RPC handles the streaming itself and indexing and cses and Etc. So instead of working with transactions and Parson transactions it can like really rely on these events it's a solid argument it point is, that I'm not proposing to use JavaScript everywhere maybe we can like Port this to rust or other languages and use it inside RPC inside other applications instead of adding the data we can like do it on the fly in RPC itself right regenerate this events. Because + +[22:00] basically all described events, that what we do with u with the transaction M XDR, that's something we already do. So maybe it's just one of the options instead of extending the XDR itself again. Because to me it looks already pretty large my subjective opinion on this. But again. If you could opt out of imian events would, that solve your issue with the size. Because it's I think besides the size concern I feel like the suggestion of let's put Comm implementation somewhere we just + +[23:00] suggesting well why don't we put this common implementation right into ore I don't think it makes a huge ideological difference and I think it does make something easier to maintain and to ensure, that they works correctly for b yeah especially. If we are doing protocal changes. So yeah and just for the say I totally agree and yeah. So I don't know. If you have any strong arguments. But I feel like you're more or less in the same page here in terms of standardization. Because it I guess just in of putting it into cor kind of make sense yeah the standardization it sits inside The Narrative of everything being a contract. So I think like, that's what's beautiful about this is, that you know + +[24:00] today we have you know classic operations. And then we have everything, that happens on Soroban and the result of those things looks pretty different. But as a result of this work we're moving towards this future where actually everything, that just happens on classic looks exactly like a contract, that's executing and you know all assets actually do have a contract, that's reserved for them they have the Stellar asset contract. And so the events are just going to look like everything, that happens with assets no matter. If it's classic or anything like, that it just looks like a contract, which is also much more similar to other ecosystems other like blockchain ecosystems as well. So there's, that familiarity for everybody about how this works versus say Ethereum and other things yeah and I'll make sure to clarify in the CAP, that nothing's being removed from meta and this will be optional, that's a good point I mentioned the config flag for + +[25:00] transaction met V4. But I'll go into, that in a little more detail are there any other questions I didn't quite catch. If we have made any final decision on MOS like what we talked about like 10 minutes ago wa wait what is the final stages. Because I maybe I've lost it yeah. So we're for in the context of this CAP we are the we're forwarding the M account information right into the addresses. But for memos we still need to explore what Nico mentioned earlier like. So there's nothing finalized there. But we are going to go look into, that right soal decision okay I was just kind of missed it okay yeah + +[26:00] no yeah I'm not sure. If we want to discuss, that more. Because it's not actually nobody's actually written like a formal this is how we're how we would propose an alternative for it to work I mean I could say something right now, that I think aligns with what niik was talking about. But I don't know. If I think it'd be valuable. If we actually get something written down. Then present it in the future meeting. Because I know we have 30 minutes left and dimma has a couple CAPs to present, that's probably more valuable right great all right go ahead okay yeah great yeah I don't actually mind spending more time on the events. If necessary but. If you're done we can go to app + +[27:00] that's six hisory. So this is basically a CAP, that mostly changes the transaction set. But it does. So in a pretty interesting way. So as you may or may not know Al a hustle you need to go through with a Footprints inur transaction is done for a good reason and this reason is being able to run the transactions in parallel. Because if you know your footprint we know, that you don't have a data dependency between two transactions and you actually can run them in parallel without worrying about any synchronization. So given the footprints in theory today what you could do is you could right your own version of core application logic + +[28:00] that takes the transactions and partitions them somehow into threads, that R par from each other. But the issue with, that is, that there is no good boundary on how much time is it supposed to take for example. If you take 10 transactions without data dependencies you could run them in 10 shads and they would for example take 10 milliseconds after synchronization or you could have the same 10 transactions. But all of them dependent on some W your entries are being updated and thus you cannot schedule them into 10 different threads you would run them in the single thread fres and in the end you will spend not 10 milliseconds. But 100 milliseconds to apply all transactions. So basically there is since protocol doesn't do anything currently about pration there is no good way to + +[29:00] schedule the transactions in such a way, that it takes some bounded and expected well there is still an upper bound on the runtime. But this upper bound rise wildly and it's not good. If Ledger may close both within 100 Mills or within like two seconds for example, which is why we are doing this CAP fix3, which solves exactly, that problem, which is given a set of transactions come up with a transaction set data structure, that guarantees certain time for appliances transactions is a c of course of, that time being time in model constructions and not of course well time CL time. Because we kind of cannot tell it beforehand without tring the contracts. But we hope, that our cost models are + +[30:00] good enough and another cave being, that you actually should have the fish is sufficient amount of physical course to support multi stading. Because well again. If you have just one core obviously it doesn't matter. If you run 10 shreds there some here so. If you have just a single core and you wanted to apply transactions in 10 threats obviously you'll not get any performance gain. Because well transactions are purely CPU bound so, that's the motivation for this CAP and the way it works is, that we Define a new structure for the transaction set, that instead of having a linear array of + +[31:00] transactions defines two levels of group transactions together and the first level is called stages and I will talk about why stages and what does it need for any moment. And then every stage consists of multiple clusters of transactions and within the cluster transactions generally may have data dependencies. So it is expect Ed in this cluster to have data dependent transactions, that need to run sequentially. However there are no data dependencies between the Clusters themselves and thus since every cluster is guaranteed to be independent of every as cluster what you could do is you could take every cluster and put it into a separate physical thread and thus you can apply the whole stage in parall is as many + +[32:00] threats as you have rers also I probably forgot to clarify what we consider data dependency it is. When a single transaction has an entry in a retrade footprint. And then another transaction has the same entry in either readon or retrade footprint me, that one transaction modifies the entry another transaction as read. So modifies it in either case we need to sequence them. Because if you don't do, that. Then we will get non deterministic results for any these transactions. Now why do we have this additional level of transaction grouping in stages the reason is, that. If we just try to naively say hey let's just par the put the transactions in separate threads put them transaction set and, that's it. Then you may run into some + +[33:00] issues with C traffic patterns for example imagine an oracle contract for example right and imagine it updates and entry some key. And then there are bunch of transactions, that want to read the value from the contract from the ccal contract and, that key and thus we have data dependency on a single entry. But we have only a single rate of the entry. But we have a lot of rats of the exact same entry. So what we can do with this stages is, that you can put right into one stage. And then all reads into different stages and basically this introduces just a few barriers of execution into the transaction execution schedule. But it + +[34:00] allows kind of efficiently work around this traffic patterns and given smart enough maybe not even smart enough algra for actually coming up with this data structure you can deal with a surprising amount of conflicts all at once without introducing any special scheduling and synchronization procedure. So basically just have a few barriers in between the transaction applications and this allows resolving a lot of the conflicts at the same time before driving deeper I go read, that for a moment and there any questions far okay the new adjust approach okay. So this app itself just changes + +[35:00] the structure of the transaction sets it doesn't have any particular changes around how we, which transactions do we pick at all. But what I can say, that the protocol specification remains such, that transaction ification order is randomized and I think in case of M manipulations nothing changes from the current approach. Because for any Arbitrage you're probably going to have the data dependency and data dependent transactions are applied sequentially and all we shuffle all the stuff, that is being applied sequentially. So the transaction order is still for to predict and yeah I don't think it's + +[36:00] any easier to manipulate than it is currently, which is to say it's not impossible. But just spaming Ledger is a arbitr transaction for example yeah in terms of the system requirements again the cup doesn't Define them I will talk about this in a moment. But of course yes. If we want to run transactions in parallel. Then well you need to have certain number of course present on the validator, which is I guess a tradeoff with the like, that's Horizontal scaling the notot use better Hardware as. Then cannot find more + +[37:00] transaction M resarch maybe I think I already have the section on shoing the transactions. So may be efficient maybe not yeah okay I see question regarding Z and let me actually go a bit deeper regarding how exactly things are specified first thing is, that Moro does introduce a new network configuration setting for the maximum number of f per stage, which roughly well not roughly. But basically maps to the EXP Ed number of course, that your + +[38:00] that the network is willing to throw it. When ledgers. If you have less course. Then it may take for you longer than expected to pric transactions you have more cores or at least, that many you should be good and as all the network configuration settings this setting will be Modified by validator vot. So you know some validators say, that we don't have, that many cores they will not vote for this and ultimately network can decide. If requirements for to rers are too expensive or too hard to come this come up this and also to make it clear. When we upgrade to protocal 23 this setting will be set to just one. So any parallelism allowed as network + +[39:00] settings. So you know, that to enable any parallelism network quote to happen Okay. Then to the structure okay I already talked through the specification of the data structure the phas fee and search pricing will work in the same way as it works now, which is there will be a single base fee for the transactions it is 100 Stoops. If there is no search price in and. If there is SAR pricing, which means like. If there were transactions, that you couldn't include in The Ledger. But this, that were in the mle we take the fee of the cheapest transaction, that we do include and use it as a base fees, that everyone has to pay + +[40:00] okay, and now again to the apply order first there is still a canonical apply order, that is defined in CAP. So you still can apply all the transactions sequentially and arrive at the same result as you would. If you were. If you applying them in parallel and similar to what we do. Now we shuffle the things before applying them first we shuffle every cluster using the hash of the transaction set and hash of every transaction. Then the for the Clusters we don't need to shuffle them. But by the transaction + +[41:00] hashes. And then we shuffle the stages as well. So yeah like. So transactions are in different stages one may happen before or after another one, that's kind of what to do for Shing and I think it's not significantly different from our St yeah I don't think it does anything specific to again it doesn't changes status quo for M on the large scale like I think the best thing you can do is still just Spam the network is heritor transactions it is the issue we have. Now and it + +[42:00] persists like the goal of this CAP is not to prevent these types of attacks. But it does not make it any easier to guarantee any particular execution order between two transactions. So yeah I don't see like. If you see something specific like maybe you know maybe we can up Asing later. But I yeah I don't think there is any change to the execution orders, that is relevant for right okay so, that's a kind of simpler part of the CAP. So again multiple threads to apply transactions in parallel oh the most important thing, that I forgot to mention is, that + +[43:00] we limit the number of sequential instructions across all the stages, which means, that for every stage we look at the cluster, that takes the longest time in terms of model instructions. And then we sum up this numbers across all the stages. And then we come up with some number of sequential modeled instructions, which are hopefully approximating the real runtime for applian the transaction set given and physical threats for example. If the Ledger limit currently is 500 milon instructions with this CAP for example. If you set Ledger Max dependent TX clusters to for example eight meaning you need eight physical threads and we keep the limited 500 million instructions we expect the weder application time be roughly a factor of 500 million virtual instructions, which we still on to few hundred milliseconds + +[44:00] currently our cost models yeah so, that's kind of the simpler part of the CAP the trickiest thing is the GTL update semantic change. So all this data dependency stuff is very good and nice. But one thing, that we again consciously did. When La in San is we allow extending the dtl of the entries even. If the entries are in read only footprint and the reason for, that is, that well we expect TTL updates to be really prevalent and. If we treated them as rights there is a big chance we wouldn't be able to AR anything at all. Because things would be just Clump together with all this clusters of + +[45:00] for us contract, that happen to update totl on the same entes, which is why we decided, that we actually can reconcile the r changes after running all the transactions without risking introducing any nondeterminism and this TTL updates someand exchange is doing exactly, that changes how we update ttls in such a way, that changes to TTL done by transactions, that only touch the readon entries are not observable until someone actually writes the entry or until we have finished applying all the transactions, which is good enough to be able to actually run all the transactions, that have the same ke only the run them in parallel and still right have the proper TTL + +[46:00] value and the algorithm proposed is pretty leny. But the gist of it is, that. If two transactions update to TL on the same entry they both will see the initial state of the TTL of the tantry and will be charged the respective fee for set. So for example. If transaction a has a key in with only footprint and updates TL of some say XLM contract rate by 200 ledgers increases TTL by 200 ledgers. And then transaction B increases TTL of exm contract by 100 ledgers well the transaction a will pay for extension by 200 ledgers and transaction B will pay for the D extension by 100 Ledges. But + +[47:00] what we will do in background is we will correct this TTL changes and only apply the maximum change out of, that to the ler out of this two to the ler, which means, that the exm contract will be in the end extended by 200 lers and it does introduce bit of annoyance into how mattera should be processed, which is unfortunate. But I couldn't think of a way, that is both compatible with prism and doesn't require any changes in met ingestion and this change is, that basically. When ingesting the changes in TLS from the mattera you should never decrease TT this is only thing you need to do + +[48:00] it is documented in the CAP and you'll probably make another announcement. But yeah we kind of get around doing something special for the map and this seems like the minimum possible change. So TTL like, that you record in the back end and never go down. But the TTL, That You observe in the is correct DET change for the mattera is correct in the context of a given transaction meaning as I mentioned before, that will charge the fee based on the changes, that you observe in The Meta and this is kind of the ju of it and U right yeah I can talk a little bit about candid generation. And then move on to question CAPs don't typically provide + +[49:00] a way of how to actually build a transaction sets, which makes sense it doesn't need to be a part of the protocol as long as it is valid it doesn't matter how it has been built. But will'll likely implement the simple gitty algorithm described in the CAP and the idea behind it is just to pack the transactions into the stages R. While uring we are utilizing the stes properly by being packing from time to time and the efficiency of being packing heris sixs is surprisingly good. So in the end I think we can I added some benchmarks and we can both P the leeders pretty efficiently like uze most of the instructions and we can also deal with a decent amount of + +[50:00] conflicts like. If you have some relatively sparse clusters of conflicts, that will not cause any depredation in lure utilization at all and in order to have any real degradation un need like some super inter interdependent transaction for, which I don't expect to be happening in the wild. So I think this should be good enough at least initially and. If we ever observe there are issues with, that ways to deal with, that both at the mle level and the U wayer of building the transaction sets I think, that's all I wanted to talk about just to present the CAP, and now I will most through the chat and read some questions + +[51:00] yeah question from niik about the meta changes for TTL M net diffs for ttls not emit one per transaction well we still need to emit TT per transaction I think. Because they are an effect of the transaction this is how we charge P and I'm not sure it is correct to just omit them and whether we want to update meta I have + +[52:00] considered this I'm not sure it is necessarily a better solution. So basically the alternative to what is proposed in this app is well we could add yet another field to the mattera, that will explicitly output the TTL every entry facted by the wer the final for it after applying every transaction in The Ledger I like, that's potentially a significant amount of duplication unless we also kind of remove the changes for transaction I'm not sure. If it is a good idea to remove the changes per transaction. Because then well meta just has some entries drop, that transaction has actually affected, which makes tricky to debu transactions for example makes to understand your fees. If you ever wanted to run the computation logic + +[53:00] comp. So I'm sure we can omit, that Pro transaction changes. But then I'm not sure. If duplicating this data is necessarily making things much easier to just not decrease in TTL with your track on it so, that's my opinion and I mean you either need special Logic for merges or you need special Logic for this new types of TT entries and you also need to kind of sometimes ignore, that for transaction changes I'm honestly not convinced. If it is a good idea and. If you wanted it possible to only ingest the T changes after the LED. Then so, that you don't even need to process per transaction changes at all + +[54:00] then basically we duplicate every TTL change into, which is again probably not very Fe you don't know n. If you want to talk more about this or we can continue offline okay I don't see any more questions and we actually have just three minutes left so. If anyone has any questions please feel free to comment on the ad discussions read it is yeah I guess the to meta change is really the most controversial thing about it everything else + +[55:00] is really uncontroversial I feel. Because though we are just allowing to include more transactions in Ledger and we still shuffle them. So no changes for any. So yeah and one final thing I'll say thanks for presenting Dima We we forgot to talk about back to the unified events a one topic about how the sa events have the asset name of the topics. But SE 41 doesn't specify having putting the asset name in the topic. So lee is about to start a thread about this. So we can discuss it there yeah Le just posted the thread in the chat yeah, that's all we have for today thank you for joining + +
diff --git a/meetings/2025-02-13.mdx b/meetings/2025-02-13.mdx new file mode 100644 index 0000000000..8c2d3cb513 --- /dev/null +++ b/meetings/2025-02-13.mdx @@ -0,0 +1,194 @@ +--- +title: "Liquidity Aggregation Smart Contract Analytics and New Soroban Host Capabilities" +description: "Deep dive into cross-contract liquidity aggregation and swap routing, and new host functions introduced in recent CAPs." +authors: + - carsten-jacobsen + - dmytro-kozhevin + - orbitlens +tags: + - developer + - CAP-66 + - CAP-67 + - CAP-68 + - CAP-69 +--- + +import YouTube from "@site/src/components/YouTube"; + +## OrbitLens' Cross-Protocol Liquidity Routing and On-Chain Analytics {#part-1} + + + +This developer meeting focused on ecosystem tooling that improves trade execution and visibility across Stellar’s mixed “classic + Soroban” liquidity landscape. OrbitLens presented StellarBroker, a swap router that aggregates liquidity across multiple venues, and then walked through how StellarExpert surfaces Soroban contract execution details, storage, and protocol configuration changes. + +The conversation emphasized reducing fragmentation for end users and giving developers practical diagnostics for contract calls, storage behavior, and protocol/runtimes as the network evolves. + +### Key Topics + +- StellarBroker liquidity routing + - Aggregates liquidity across Soroban AMMs, classic AMMs, and classic orderbooks + - Routes and splits trades to improve execution price and reduce price impact on large swaps + - Designed to reduce “retry loops” by tracking submitted transactions and re-quoting on failures + - Connects directly to Stellar Core for fast updates (often ahead of Horizon timing) + - Current venue coverage: + - Live: Aquarius pools, Soroswap pools + - In pipeline: `FxDAO Hub` and `Comet` pools + - Partner integrations + - SDK-based integration with revenue sharing for applications/wallets embedding swaps + - Already connected to Lobstr wallet; additional Ultra Stellar apps in progress +- StellarExpert Soroban analytics + - Transaction breakdown with contract invocation trees and function-level details + - Surfaces parameters, return values, runtime/SDK versions, and contract-emitted events + - Displays balance movements as debits/credits for easier interpretation of token flows + - Storage visibility: + - Instance vs persistent vs temporary entries + - TTL and last-updated metadata for stored values + - Contract-level dashboards: + - Invocation stats by function and time window + - Interface discovery (public functions/types) and WASM download + - Protocol/config history view: + - Tracks protocol upgrades and Soroban runtime/config changes (limits and settings) + +### Resources + +- [StellarBroker](https://stellar.broker) +- [StellarExpert](https://stellar.expert) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to this week's Stellar developer me. If you were joining us last week you would have seen OrbitLens I invited him back again. Because we had. So much to cover last time, that we didn't go through everything we wanted. And so obit Lance has actually built quite a few projects on Stellar and one of them I'm sure you're very familiar with is still our expert. So we're going to cover, that a little bit today. But first we will hear more about sell broker. So yeah + +[01:00] just please just introduce yourself again I know introduced you last week. But for new people on here please go ahead and introduce yourself yeah sure hello everyone everybody knows me as OrbitLens I'm the lead of StellarExpert team we're building different infrastructure services on Stellar network and basically today I'd like to talk about two of them Stellar broker yeah. So Stellar broker is a multisource liquidity swap tool and swap rotor for Stellar currently star has several issues + +[02:00] with fragmented liquidity basically it leads to worse price for end users complexity using varas protocols and as a result it's quite difficult to find the best price between all of them. Because some assets are located mainly on classic Stellar DEX and others on new protocol imms. But someone executes like large trade. If someone trades a scale usually such large volume trades are vulnerable to front running and front running actually leads to profit loss on multiart trades. Because splitting each trade into several + +[03:00] smaller trades results in a situation. When somebody can front run your Swap and try to be faster and extract The Profit and of course there is simply the question of liquidity. Because I do remember there was a situation where people couldn't simply swap on chain and execute the settlement for about 100,000 USDC in one trade with one pass payment trate, which is also not the best user experience. So we've been working on the project called U Stellar broker with the with a team from Ultra Stellar + +[04:00] so our original idea was to implement the solution, that will cover everything in one One Stop. When you, that will allow you to swap anything and execute your transactions on chain classic and San it provides a lot of benefits for and users providing better exchange rates front running protection and on large trades it will lead to the minimal price impact non-custodial swaps allow us to avoid centralization issues and also there is no need to + +[05:00] retry failed transactions. Because everything will be retried automatically and even. If the swap fails for some reasons for example liquidity isn't there anymore. Then the system will propose a new like a new trade opportunity, which will be automatically executed for applications, that want to integrate our service we can offer revenue from user swaps reliable execution flow seamless API integration, which means, that you just connected to your existing wallet and it will work just fine and of course it's a fling the complexity of building for two different networks like classic andoran + +[06:00] which is a bit separated despite, that it's under the same Stellar network grof and like managing multiple connections to various swap protocols. So partner, which connects to the system can view the Revenue and receive payouts for the swaps executed by the users this kind of monetizations benefits and users and developers alike. Because users still get better rates and developers can receive like micro bonuses from these transactions from their user base the integration will be pretty is straightforward we already published + +[07:00] the SDK for it and everything you will need you'll just need to insert AP from the website and from this point will work will just work and as a hood it's much more complex than it looks. Because it works with varas liquidity pools on the San. So currently we have Aquarius and S SW pools connected at our production website and we also have two more pools in the pipeline, which expected to come next week it will be forx Hub + +[08:00] and it will be comment pool. So our execution engine like finds the best opportunities and first of all analyzes how the trade can be split into many multiple trades. Because executing several different trades instead of one large transaction means, that there are chances to get front run or to just end up with a failed transaction. Because the market is always a dynamic scene it's moving a lot and it's virtually impossible to guess, which liquidity will be there like in the next Ledger. So our engine fls all the complexity here. Because it + +[09:00] provides tracking of submitted transactions it automatically connects to Stellar Core fetching all the required information from there and we basically can basically have the entire graph of all available DEX orders there in memory and of course graph for soran tools this is everything is located in the same place and provides you a very convenient interface on top of web soet API, which is pretty fast we made some measurements and like average quart response time is less than 300 milliseconds, which is very fast and usually like our + +[10:00] nod receives updates and send it to user like more than two seconds faster than Horizon. Because we connect directly to Stellar Core to receive all the trade information as fast as possible and we can execute like multiple trade simulation per seconds it's tens of thousands it scales well with paralyzation of CPU threats. So it means, that getting more simulations results in better price quotes for end users so, that's basically pretty much all of it I just wanted to say, that is it is already connected + +[11:00] to albida wallet and we're working to connected to Stellar and other Ultra Stellar applications. If you are interested to try it to play around just let us know and let's talk any wallet or like application, that works with user trades U can benefit from this optimized transaction execution flow. So just let us know probably. If if we have any questions in the chat I'll be happy to answer yeah let's see. If there's any questions in the + +[12:00] chat there was one about public facing docks I think I've shared a link to GitHub is there anything else people can go look at yeah it's basically the J Hub link and currently it's the npm package, which shows the example of connection to the system so, that's basically everything you need to integrate great I think it's super interesting also and not just. Because of the ease of use. But also the speed and the fact, that you can bring the swap in on your own application and decide what fee you're going to charge for doing the swap. So super interesting. If we don't have any other questions + +[13:00] we can also see. If we have some time at the end of it. But we can move to StellarExpert and, that should be a tool, that is known by most Stellar developers it was probably the first tool I started using trying to verify, that my transactions were going through and just playing around with different Quest and onboarding games. So so yeah it's I think it's familiar to most. So be interesting to hear from the creator of it yeah happy to hear, that and, that's the tool, that been around for years and I'd like to thank you for all the users, that keep up with us and despite sometimes U arise in small bugs problems with it we're trying to keep it + +[14:00] as functional as it could be and today I'd like to talk a bit about smart contract stats and analyzes of smart contract transactions. Because we've been receiving some questions by email and other communication channels on how it works and I think it might be interes in not only for newcomers. But also for season developers still. So first of all how can you get to the transaction and how can you find something you just need to copy paste like account address contract address or proit name into the search box on the website and will lead you directly to the account or + +[15:00] contract history where you can see this list of transactions here you can see operation type and like execution date to check a particular operation you just need to click this small arrow and it will present you the expanded view I know it's a bit scary. But no need to panic it's only from the first glance we'll go through it and it will be quite there is nothing too complex about it first of all it shows the C tree of the top level contract invocations. So we have this level contract, that can invoke some other contracts and + +[16:00] provide additional functionality through this. When you click this info icon you can see the expanded view of the contract function itself it automatically shows you parameters of the function return value like version of the runtime and SDK and. If developers left Commander. Then you will also see this R commands in this section debit and credit records show the amount of token, which have been + +[17:00] transferred like spent received in the trans during the transaction execution. So we can see here for example, that this amount of tokens has been debited from this account and credited to another account contracts can save the data on The Ledger with different storage retention options. So like in data is basically the data, that is inside the contract itself and every time like the environment loads the instance of the contract it also loads all the data attached to it resistent entries stored separately and temporary items can be used to store some cheap temporary + +[18:00] information, that can be easily evicted additionally some contracts may emit events, which is U this mechanism is intended for publishing data for indexers the event consist of the header of the event and some arbitrary contract data. So these events can be further analyzed by indexers by rpcs or some other applications and at the bottom of the invocation you can see what happens on the system level. So like input output memory usage Ledger entries written and U read and + +[19:00] many other very low level statistics and additional of course here you can see also fees charged for these particular transactions, which can be analyzed further now. If we scale up a bit we have a contract view. If you just click the address of any contract you will end up here at the contract View and it will display like the general summary of the contract and current contract balances this is I think is a liquidity pool contract, that holds XLM and you DC + +[20:00] assets we have here the information who created the contract and there was some hash of the contract also it was pretty active contract, that made a lot of transfers data storage here shows the number of entries, that have been written and stored on chain below it you can see the detailed breakdown of all the ocation stats here you can play around with functions selecting some spe specific function you want to analyze choose the period for starts and analyze some specific statistics. So for example. If you want to know why + +[21:00] these grew up after the recent deployment you can see it here you can see how this value change over time and how to analyze them history tab we already talked about iter and the interface tab contains the list of public experts from the contract it's like functions all the structures enams everything, that is declared in the contract and publicly available all the contract interfaces here and it's quite handy tool. If you want to build a transaction using CLI or any other Tool or you just maybe want to play around Vis it and check + +[22:00] what's inside the contract here is also the link for downloading the contract was and here is the next interface, which shows the list of stor data entries for some particular contract here we can see the, that the entry is persistent or not it's TTL value. So time to leave. When the last it was updated and balances attached to these values and one more thing, that I'd like to cover probably not everybody knows about this functionality. But we have protocol versions history in the interface it's under the main menu + +[23:00] section and in the protocols history you can see changes, that were applied to the to The Ledger during prodal upgrades and of also during suran runtime F changes updates it's the lowlevel view of what happens. When validators vote for increas in limits and basically here you can easily check and say what are our current limits and what was the last update and what changed during I have a lot of more to cover. But let's wrap at it. Because I think, that we have already few minutes left so. If you have any question questions let + +[24:00] let St yeah thank you for the run through I definitely have learned a little bit I'm still a pretty basic user. But but I think it's a very it's very practical to be able to see something like the protocol upgrades I think there's a lot of good features for debuging and for evaluating your contracts do we have any questions from the comments I don't see, that I don't think. So but yeah let's see there's one here something about combining plastic and soron is this in the context of StellarExpert yes. So we already show + +[25:00] the transaction history and trades and everything for classic operations. But trades history and some specific for example liquidity pool stats or for example blend pool stats they are still unavailable. Because like right. Now we still have the process of standardization in progress. So right. Now the committee is talking about moving to some more obvious standards for publishing for example events what will be the format of these events and how they can be further analyzed by Downstream systems to produce all the DAT it should be available soon in + +[26:00] Star expert all the price history all the stats from surban Contrition okay great I think, that's all we have time for today thank you for joining again thank you for inviting this was really interesting I love to see some of the community build tools it's a it was a pleasure to chat to you again and yeah. If anyone else has some questions I'm sure they can reach out to you on Discord. But yeah definitely for joining and thank you all this thank you okay thank you bye + +
+ +## New Soroban Host Functions for Address Introspection and Data Conversion {#part-2} + + + +This protocol meeting covered two small but developer-facing CAPs that add new Soroban host functions. The goal is to make common on-chain patterns easier: (1) introspecting what an `Address` actually represents, and (2) converting between `String` and `Bytes` without awkward workarounds. + +### Key Topics + +- Updates to CAP-0066 (Soroban In-memory Read Resource) +- CAP-0068 address executable introspection + - Adds a host function to retrieve an “executable descriptor” for an `Address` + - Distinguishes: + - Classic accounts (no executable) + - Built-in Stellar Asset Contract (SAC) addresses + - WASM contracts (returns the contract code hash) + - Use cases discussed: + - On-chain detection of SAC vs custom token contracts (no hardcoding) + - Safer authorization patterns for modular accounts/policies by pinning trusted WASM hashes + - Better interoperability for cross-chain/token tooling that needs reliable contract identification + - Implementation discussion: + - Preference for a simpler/flat data structure for the returned descriptor (clarity over complexity) +- CAP-0069 `String` ⇄ `Bytes` conversions + - Adds host functions to convert `String` to `Bytes` and `Bytes` back to `String` + - Motivation: + - `String` has limited manipulation utilities today + - Many operations (e.g., indexing, slicing) are easier on `Bytes` without copying large buffers into guest memory + - Improves ergonomics for contracts that need to process text-like data without bespoke memory work + +### Resources + +- [CAP-0066](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) +- [CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) +- [CAP-0068](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0068.md) — [Discussion](https://github.com/stellar/stellar-protocol/discussions/1626) +- [CAP-0069](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0069.md) — [Discussion](https://github.com/stellar/stellar-protocol/discussions/1633) + +
+ Video Transcript + +[00:00] I see some folks have come to the meeting already and we probably can get started first and pleas meain chat like. If you can hear me. Now okay great thank you right. So today we have two rather small CS to go through both cups are about adding some new host functions for turbon and both pretty simple. So + +[01:00] just hopefully shouldn't take too long to discuss the first okay maybe let's wait for like 30 seconds more I see folks keep coming right hey can see no one has t some last even okay. So let's get started right we have tab 68 first and tab 68 proposes to add a new host function will be used to get at an + +[02:00] executive for an address and depending on what the address is the EXA the executable value will be different depending on what the addresses more specifically. When the address corresponds to the classic account with technically no contract and executable for it, which is why we'll return Special Value, that this is a classic account now. When the dra corresponds to a contract there are also two different cases one case is. When the contract is a to asset contract, which is our buildin contract and in, that case again you will get the flag, that says, that this is a tered contract and. When you're doing with a CL + +[03:00] with a regular vasm contract we'll tell this vasm contract and provide the respective has this is actually both specification of the CAP and quick description of the CAP there isn't much to talk about here. But I guess there is quite a few things to tell about motivation for why would someone actually want this super Po and why this hasn't implemented initially in the protocol. So in general addresses are kind of abstract right You can for example call require o for any address and in the background we will figure out what exactly to call for it or you can save the address in the storage for example and you don't care exactly what B is suggest. But there are some more + +[04:00] narrow use cases, which kind of came up during the time of Tran's existence and they're kind of different in their ways. But the common topic as well some information about the address is necessary to be retried specifically one of the cases, which has been brought up is about distinguishing the custom tokens and S asset contract tokens and the way this function will help is, that it will be actually able to tell on chain. If a given contract is a store asset contract. And then for example you can look up the contract name and see, which classic asset it corresponds to and you actually don't need to worry about like. If you know the executable is a s contract and you know + +[05:00] that its name is for example native right. Then you actually are 100% sure on chain, that the contract you dealing this is actually xrm contract, which has the name native and executable ised contract and same goes basically for any asset and this use case has come up in the context of the AEL breaks, that does some FR chain token Ren and basically you know to deploy a token on a different chain they need to understand like. If it is a classic tokens they doing with and th they can create appropriate metadata for this event and there is no need to hurt code for example ID for all the contracts and I think the question of what is the address of the XLM contract or how they figure out. If something is XLM has come up quite a bit before, and now we provide a way, that doesn't actually involve for in the hardcoded + +[06:00] XLM value, which may be valuable in some cases another re case, that has come up here. But basically. When deing with a custom accounts there are maybe several actually quite different cases for why you would want to know specifically what is a hash of a particular wasm contract what is a wasm hash and this may come up both in the implementation of the custom account itself say I you want your customer account to only authorize token operations on some tracted token implementation right again for example to asset contract trusted custom token implementation or maybe in general you want to be able to verify the source of certain contracts you + +[07:00] want to in right it is more of a customization SC and in another case, that T has brought up specifically is, that. If you wanted to build some modelized account, that have several contracts Implement in several different authorization policies, which kind of makes sense for the general customer account custom customization you basically may have some contracts, that would do us for you and you actually want to be confident, that this contracts will not be updated. So for example you trust certain contracts to do something on your behalf and you trust its current implementation. But you do not want this implementation to be ever seen and. If you know + +[08:00] what is the current casat you can actually enforce, that and for the hasn't make sure, that the hash hasn't seen in the future location. So all and allers are like pretty narrow cases it probably doesn't come up for each and every contract. But I think they are interesting enough and in general functionality exist makes sense to provide it like other part of the deployer function right you can deploy something. But you cannot know what exactly has been deployed. Now we kind of boot in another end of this and I think beyond what I've been talking about there are some other things, that people may come up to with this functioning bits it is not something you need to store for example right I can try to speak louder is it better + +[09:00] okay yeah I should have P me sooner yeah. So what I've been saying is, that the use case presented inen discussions are some this. Because it motivated this happen the first place. But I'm sure there are more things, that people may come up with and this functionality is not unique to s. But for example EV EVM has functions for retrieving the actually they even have functions to retrieve the entire code, which I don't think is relevant to soran. But also like to retrieve the hash of the contract code, which is more or less what we are doing here. So I think all know it's pretty sensible idea + +[10:00] to have and seemly some use cases where this is helpful. So this is pretty much it on the C presentation and I don't know. If there are any questions. But there was a discussion in GitHub regarding the data structure to be used here. Because the initial CAP has n data structure proposed and Lee has suggested to use a flat data structure and I think I haven't replied on the discussion. But after looking at this again before this m I think it seems like a sensible suggestion. So I'll probably update the CAP and makes it ANM structure flat yeah. So it's like guess small implementation detail. But I + +[11:00] think it probably makes sense to do, that yeah so, that's pretty much it on [CAP-68](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0068.md) are there any questions e + +[12:00] yeah regarding the fony yeah I agree, that even, though we kind of going to have two very similar data types I think it makes sense conceptually to have this FL I guess it's not too much of Maintenance burden yeah not for performance reasons. But mostly for quity all right anyways. If anyone has any more questions please pause them in the discussion thread other, that's pretty much it for [CAP-68](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0068.md), and now [CAP-69](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0069.md) it's an even simpler CAP and it simply adds two conversion functions one to com + +[13:00] convert string object into bite object and another to convert bite object back into string object and the reason for this is basically, that spring object well first thing is, that strings and byes and thran are exactly the same thing. So kind of makes sense for them to be convertible to each other. But the main reason for why this has actually come up is, that due to I guess bit of an oversight in the design string has very restricted set of the host functions, that can do anything about it specifically you can get only stream lens today and copy it into memory. So people were trying to get + +[14:00] for example a character of a string object and, that's not possible without actually copying the whole string back into guest memory and. If you don't know the lens of your string. Then you need to link aloc or do some weird ha where you would only Lo some swice or something. So basically it's all kind of hey and just adding the conversion functions is a very quick fix for, that. So you can convert string into bites on the host side perform any of the operations, that are already available for bites. And then maybe either converted back or I don't depending on your logic. So yeah I think it was mostly design I don't think say great reason to not have it + +[15:00] and also this has come up actually about a year ago for the at least once before like recently and again people are just confused why there is no conversion between bites and strings and well you can just add it's a very simple change. So I guess, that's all I have to say for this CAP yeah so. If there are any questions again please let me know or please pause them in the respective GitHub discussion right so, that's it for me I don't know + +[16:00] if someone wants to use this time for [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) or not I don't know. But yeah, that's pretty much it for me and. If no one else has any to about. Then you can call it Med e + +[17:00] .. (text chat only) + +[18:00] .. (see [Discord thread](https://discordapp.com/channels/897514728459468821/911254664576643122/1339714705878679594)) + +[19:00] okay it seems like no moment discussion for today and I guess, that's it for this meeting thank you everyone for attention + +
diff --git a/meetings/2025-02-20.mdx b/meetings/2025-02-20.mdx new file mode 100644 index 0000000000..8cab4fcef0 --- /dev/null +++ b/meetings/2025-02-20.mdx @@ -0,0 +1,248 @@ +--- +title: "KaleFail Demo App Smart Wallet UX and Memo Support for Unified Token Events" +description: "This overview highlights Soroban smart contracts, Horizon API, and Soroban RPC." +authors: + - carsten-jacobsen + - dmytro-kozhevin + - elliot-voris + - george-kudrayvtsev + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh +tags: [developer, CAP-67, SEP-41] +--- + +import YouTube from "@site/src/components/YouTube"; + +## KaleFail Demo App and Passkey Smart Wallet UX {#part-1} + + + +Elliot Voris introduces KaleFail, a playful Soroban demo app built on top of Kalepail’s KALE project. It’s designed as a hands-on learning tool that showcases smart contracts, passkey smart wallets, and a simple DeFi-like trading experience—wrapped in a fun “trade kale for vegetables” mechanic. + +The walkthrough covers how the trading post contract works (including admin-managed extensibility for adding new “vegetables”), how users sign transactions with passkeys, and how Launchtube abstracts fees and other chain complexities so wallets can operate without holding XLM. + +### Key Topics + +- KaleFail’s purpose as a Soroban demo app built on the KALE ecosystem +- Trading post mechanics: swap KALE 1:1 for related “vegetable” tokens (broccoli, cabbage, kohlrabi, etc.) +- Contract flow for trades: + - validate requested asset is supported + - transfer KALE into the contract and mint output token, or burn input token and transfer KALE out +- Owner/admin interface for extending inventory (demo adds a new asset and registers it with the trading post) +- Passkey smart wallet UX for signing actions without traditional wallet friction +- Launchtube paymaster flow: + - app submits a signed transaction to Launchtube + - Launchtube handles fees, sequence numbers, and submission + - enables “ready-to-use” wallets without requiring users to acquire XLM first +- Planned features: login with an existing KALE wallet, a “kitchen” flow to burn vegetables into a salad-style NFT, and potential community-driven NFT ideas + +### Resources + +- [KaleFail](https://kalefail.elliotfriend.com) +- [Repository](https://github.com/elliotfriend/project-kalefail) +- [KALE](https://kalepail.com/kale) + +
+ Video Transcript + +[00:00] Hello and welcome to this me this week's Stellar Developer Meeting today I have a guest and the guest is actually from my own team it's Elliot and ell is going to talk about a project, that he's been working on and it's a fun project in our team and the deal team we talked about it should be fun to learn to build on Stellar fun to learn to build on Stellar and one of the ways can do, that is to play around ourself and build some cool stuff we can play around with and hopefully you find fun too. And then of course there's a lot of learning involved in, that. So I'm very excited to introduce you to very excited to introduce you to Eliot and his pet project kale fil. So maybe you can just give a little introduction to how did this little introduction to how did this all get started I think has something to do with kale yeah. So like Carson mentioned my name is Elliot I am a + +[01:00] mentioned my name is Elliot I am a developer Advocate also here at SDF I get to be at jack of all trades in a lot of ways and I love I've Loved for a long time just messing around with Stellar having fun and just breaking stuff as you said Matias go I was reminiscing with Tyler earlier today about all the ways, that we used to kind of like Jerry rig this chain of operations into creating some smart contract esque kind of thing. But Soroban is one-year-old today and we have actual official real life smart contracts on Stellar and it's an amazing time to be developing in the community and it's an amazing time to be here having fun I think a lot of the time we get kind of like caught up in the seriousness of what we are doing here at Stellar and the real life + +[02:00] here at Stellar and the real life implications of what we're building and sometimes there's just room for like something on the lighter side you know. And so Tyler started, that in, that vein recently with kale I think it was back in November he kind of launched this. And so I'll share my screen. Now and yes perfect. And so kale. If you aren't aware is sort of like this I'm going to m coins kind of asset built on Stellar, that Tyler AKA kalepail introduced originally Enrique kind of created this SCM or fcm contract, that initiated this sort of like mining competitive sort of asset concept and Tyler took, that and kind of went in a more teamwork oriented kind of + +[03:00] Direction with it so. If you aren't already I highly suggest you start mining some kale it's a lot of fun I'll demonstrate it's real easy real simple. If you're not already logged in you can create a new account I'm actually going to cancel, that. Because I already have one. But I'm gonna log in and I'm presented with this oh, that was the wrong one no it wasn't I'm presented with this prompt, that presented with this prompt, that I can plant something and I'm GNA sign, that with my pass key this is a smart wallet, that I've been given to plant and sort of get this opportunity to mine some kale and everybody is kind of in this together we're like communally Community Driven mining. So to speak sort of like a pool. If you've ever tried actually mining ether or Bitcoin or whatever + +[04:00] whatever and all of the people for about five minutes or. So pull their efforts to kind of hash and create this like this hash, that has a certain amount of zeros there's a lot of math. If you click on Tyler's project repository here it'll take you to his GitHub where there's all kinds of stuff about how the math works and as a group of people it all yeah it all gets matched together and. When you work. And then eventually Harvest after a block has been successfully planted and revealed I don't know. If that's the right word. Then everybody can Harvest and you get your kale back to you get kale minted to you back to automate it with his website and you don't have to use this website to do + +[05:00] it. But it's a lot of fun and it's honestly the easiest way to do it you C we've got a leaderboard here and you can see, that Bri right here with her 3,000 or. So is in the top 10 congratulations and yeah it's a lot of community is of fun a lot of community is kind of built up around this and it's a really good time to check it out and to get involved the thing, that everybody's been asking, though is like I've got this kale I've got in this wallet one and a half or. So oh I get to harvest one and a half or. So kale, that I've mined what do I do with it and up to. Now it's kind of been like not, that much like there's not really a market for it you're not really like selling it and, that's kind of by Design. So what I created I'm + +[06:00] affectionately calling kale fail and Tyler really loves, that name he's super excited about it and it's just sort of a riff a spin. If you will on this kale asset, that Tyler's put together. And so right. Now it is essentially a trading contract and this contract makes it possible for you to send kale just like his farming site you can sign up here with a smart wallet it'll give you a smart wallet and you can put some kale into, that contract into your smart wallet. And then you can trade it for right. Now we've got broccoli we've got cabbage we've got kabi. If you don't know the farming lingo those are all different cultivars of the same species of plant as kale it's like really they're all the same thing, which is why it's always tradable one to one. And so I'm going to send like a 100 kale let's get some + +[07:00] broccoli back make the Trade sign with your smart wallet. And then it thinks for a second longer and it thinks for one more second and 105 kale I mean broccoli. Now in my wallet let's do the same thing with cabbage and it's essentially just the smart contract is holding on to the kale in the reserves and you can see this update date as the transactions go through it's holding it in reserve and it's minting new vegetables to me let's do Co Robbie as well I'm going to make a trade sign in and I'm going to get as many vegetables as I dang well please. Because I need the fiber or something I don't really know what vegetables are good for. But now I'm sitting here I've got 100 each is of bro Oli cabbage col + +[08:00] Robie and the way I designed this contract is, that it's kind of extensible. If that's the right word I'm not entirely sure to allow you to allow the owner of the Trading Post to update their stock or add new vegetables take them away and kind of expand and grow as the arises. So what I'm going to do here I'm going to copy paste this link into the chat as well. And then I'm G to try my best to share a different window what. If I do, that and present and we can do this one does, that work does everybody see, that cool NOP, that's the wrong I don't want + +[09:00] that okay. So the contract works it's pretty simple as far it's split into like owner customer kind of interfaces and really there's one function for the customer it's trade right you have an address for your customer what vegetable you want to trade how much of it and whether or not you're buying kale whether you're getting kale out of the exchange or. If you are getting broccoli or whatever the case may be and it does it just makes sure, that it's an actual vegetable, that we have in stock it sets up a couple of clients depending on. If you're buying kale or selling kale. And then it. If it's selling kale. And then it. If it's kale, that you're buying, that means kale is going to be transferred from The Trading Post contract into your own wallet it's going to burn the broccoli or cabbage or whatever you're Trading and it's going to transfer from The + +[10:00] Trading Post contract into your customer wallet otherwise it's going to receive as the kale receive the kale into the Trading Post contract transfer. And then it's going to Mint the vegetable into your smart wallet. But I also have this owner interface, that allows me to add vegetables. When I see fit so. If you'll all bear with me I'm going to try this without having done this before we're going to try this real live I'm going to add brussels sprouts Raph has been asking for brussels sprouts since the day I launched this. So I figure it's probably time to give him what he wants. So we're going to Stellar contract asset deploy source is going to be kale Bale, that's what I call it + +[11:00] network is main net and the asset is going to be this thing, that I'm copy pasting right. Now so what you don't know is, that Russell's Sprouts is far more than 12 characters, which is the maximum amount, that can be, an asset name. So I had a real hard time landing on a proper abbreviation for what Brussels' Sprouts should be okay. So the asset is deployed. So we will Stellar contract invoke source is kale fail no yes kale fail network is main net ID is going to be whatever this asset contract was and we're going to invoke the + +[12:00] was and we're going to invoke the set admin function new admin is going to be the Trading Post contract address and the transaction is signing and submitting. And then we get transaction successful where is it true cool I always look for, that. Now we get to add it to our Trading Post F contract invoke Source kale fail network main net ID is the Trading Post contract and we're going to invoke the add vegetables function new NOP it's called veg doubles + +[13:00] to add. Because I am real creative mattius and we're going to make an array of or vector rather of addresses. If we wanted to do more than one. And then we invoke simulation's okay submission looks like it's okay let's go back to I know I'm giving you guys Whiplash with all of these window switches okay. Now we go back to kale fail and. If we're lucky yes the vegetables available are. Now including brussels + +[14:00] sprouts. So I'm going to snatch up some of, that BR Sprouts is about the closest thing it was my wife's idea she actually enjoys Brussels sprouts, which is sin basically. But now I am the first one with my very own Russell Sprouts be sure to grab some on your own I've got a couple things up next for kale fail number one is, that it's a pain in the butt to sign up for kale fail for kale mine some kale and get some stuff there. And then also have to sign up for kale fail transfer the tokens do all, that before you can actually do anything. So I'm going to set up an ad signer kind of functionality where you can log in to kale fail with your existing kale wallet I'm excited for, that should be really soon able to do, that next thing after + +[15:00] that is this kitchen, that Tyler and jrome have been itching to get a hold of soon we'll have a salad like NFT basically just kind of a pretty simple like let's trade your Co tokens burn the vegetables and give you a really pretty picture of a salad or something else I'm not sure what other produce related things might be in store. But soon after, that got in mind for like more fancier NFTs. If any of you have been around for like four or. So years and remember the smart NFTs, that Tyler put together some maybe interesting Concepts similar to, that. But actually on soron and even maybe like Community submitted recipes or other kinds of NFT goodness fun things, that can be voted on maybe and approved and all, that kind of stuff I'm super open to any + +[16:00] ideas you guys have or anything, that people kind of generally want to see or what do you want to do with your kale maybe we can make it happen. But yeah, that is kale fail in a nutshell everybody go have some fun enjoy the AI art, that I stole from Tyler's site to begin with and read all of the Fantastic lore, that Bri has put together like it is phenomenal all of the like five whole chapters like it's a freaking novel go enjoy it great thank you for the it great thank you for the presentation and actually this is a fun game it's fun I is a this got a little bit addicted to farming kale and it's really fun but. When you think about it what. If we take kale and kale fail it's actually a DeFi application. Because it has the elements of a DeFi + +[17:00] application it's just gamified a little bit and instead of using real token names or real coin names real asset names we're using are you using kale and broccoli and Brussels sprouts. But this is actually a DeFi application and as you are switching between. When you're trading kale for broccoli I hope a lot of people noticed, that you're just using pass keys there was no using pass keys there was no complicated wallet access or anything like, that just use just Ed the a pass key. So so this is actually it's a good example. If if. If developers want to play around and see what can you play around and see what can you build on Stellar and with soran and how is this build and with soran and how is this build. Because underneath all the fun stuff it is actually a DeFi application and there's a lot of there's a and there's a lot of sorant features like pass + +[18:00] lot of sorant features like pass Keys, that's included in this. So there's a lot of learning, that goes into understanding how this works and lot of inspiration and it's a good sample project. If if you want to learn more about this. So yeah any other Tech, that I left out, that I didn't fun that you can learn from looking at the source code here yeah the source code is definitely here like available on the kale fail site like easy to get to it's a spelt kit application. If you're like into front-end libraries and Frameworks and stuff, that's just kind of how my brain works to build in, that. But to your point, that everything is Pass Key powered like this contract this smart wallet, that I have CB WN whatever doesn't have to hold like this space reserve of lumens there's no lumens, that it holds s in oh it's + +[19:00] $163 sweet it doesn't have to hold any XLM to operate on the network everything is done through launch tube. So it's like this pay Master service, that really abstracts the gas fees sequence numbers transaction envelopes all the like difficult things about building on Stellar sometimes is like just taken care of. And so like no fees come out of my smart wallet same is true for. When you're mining kale it's all done through launch tube and it's just this incredible opportunity to use these smart wallets to like just have a wallet, that you can like hand to somebody you don't have to tell them like okay. Now buy lumens from coinbase and do this to get your wallet set up it's just ready for users to actually use. So yeah you just mentioned use. So yeah you just mentioned launch tube and maybe. If everyone is not familiar with what launch tube is + +[20:00] can you maybe go into yeah little bit of details how you're using launch tube and what role it plays in this yeah. So launch tube is a service, that we're kind of pioneering to make it easier for applications and developers to build applications, that use Stellar like. When you are trying to build a transaction you need a sequence number you need an account to pay, that sequence number you need a balance to pay those fees you need a Base reserve held in the wallet like one Lumen minimum to like make sure, that your account is active like a minimum balance requirement at some banks and all, that can kind of be a pain for a developer. If you're trying to get users set up on their wallet so, that they can actually interact with the blockchain. So you got to either sponsor or give them one Lumen to satisfy, That + +[21:00] Base reserve you have to pay whatever fees for those transactions to actually hit the network launch tube, though is in you get assigned transaction from the user using my pass key wallet for these examples. And then you send, that transaction to launch tube and launch tube will take care of the sequence number the fee paid all like the developer has to worry about is getting a like a token with valid launch tube credits on it and, that's super easy barely an inconvenience. If you go to test net. Launch tube. XYZ genen it'll give you one for free. If you want a real life mainnet one jump into the launch Tu channel in our Discord or reach out to myself for Tyler or Carsten Chris any of the + +[22:00] developer Advocates would be more than happy to provide you with launch tube credentials so, that you can start building in this same like super easy super streamlined way I feel like I didn't fully answer, that sorry, that's great. So yeah I can only encourage everyone to go play farm some kale do some trading get your broccoli other vegetables and then go look at the source code. Because this is in this is a really cool application it really shows a lot of different features of Stellar and soran. So go have fun with it and go look at the source code and learn how Elliot built this. So I think this was all we have time for today unless we have a couple of questions I don't think we have one. But you there still a for the quick there is an opportunity to ask a question + +[23:00] anybody sweet doesn't look like it. But thank you so much for joining Elliot it was fun to see kale Phil and yeah hope everyone is having fun with it thank you have a great day bye + +
+ +## Memo Support Updates for Unified Token Events (CAP-67) {#part-2} + + + +This protocol discussion focuses on adding memo-related capability to CAP-67 unified token events to better support batch-payment style workflows and exchange/custodial requirements. The group debates how memo data should be represented and propagated through token transfer interfaces and emitted events, balancing compatibility, developer ergonomics, and downstream indexing needs. + +### Key Topics + +- Proposal direction: extend the token standard with a `transfer_with_memo`-style function to support optional memos for `from`/`to` +- Event design debate for memo propagation: + - whether memo data belongs in event topics (filterable) vs event data (less index pressure) + - concern: putting memos into topics increases topic size/indexing complexity and may be too granular +- Compatibility tradeoffs: + - moving memo info into event data may require a breaking change if current event data is a simple integer rather than a structured map + - alternative: emit a second “transfer-with-memo” event alongside the existing transfer event +- Multiplexed address approach (m-address / muxed accounts) discussed as an alternative to explicit memo parameters: + - aim: avoid doubling down on “memo as a separate identifier” patterns + - concern: contract developer experience and token interface complexity vs client-side unpacking complexity +- Practical downstream considerations raised: + - how exchanges and indexers would filter/consume events (often anchored on base account, then reconcile memo-like metadata) + - avoiding double-processing if multiple events represent the same underlying transfer +- SEP-41 implications noted: + - memo support likely requires aligning token event formats and clarifying how extended event data should be represented (e.g., structured map carrying `amount` plus optional fields) + +### Resources + +- [CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) +- [Discussion](https://github.com/stellar/stellar-protocol/discussions/1553) +- [SEP-0041](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0041.md) + +
+ Video Transcript + +[00:00] All right I'll get started well welcome everyone today I'll be speaking about an update to [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md), which is the unified events CAP. So so a recent change was made where with the goal of reaching feature parody with regards to Memos in seller classic and more specifically you know batch payments within a transaction with different mxed operation Source accounts are possible in classic and this is a way you can make batch payments to an exchange. So we wanted to make similar functionality possible in sorun. So the proposed solution is to add an extension function called transfer with memo to the Token standard. If if you want to support memos. So the function takes an optional memos for both the from and two addresses and will emit transfer event with a new topic, that is a vector of memos this format does need to be discussed, though there's + +[01:00] there's been a lot of discussion on this recently so. If we don't expect, that the memos to be filtered on. Then it should actually be in the data field. But putting this into the data field will be a breaking change, which is why I didn't initially update the proposal for, that. Because the data field is currently an integer instead of a map. But one of the options is making, that breaking change and updating, that and the other option is to split the. If you want do want the top memos to be filtered on is to split, that Vector two separate memo fields in the topic. So we leave the memos in the topics and allow them to be filtered on there and instead of. So those are updates to the transfer event the other alternative is instead of emitting the transfer event we ALS we emit a we still we emit the transfer event as it exists today. And then we emit an additional transfer with memo + +[02:00] event with the memo information so. If you care about the memo you consume, that event as well and there's also an alternative proposal in the thread I just linked from Dima to First Class the MOX count type, which is a different proposal from the transfer with memo a methodology, that the CAP defines. So yeah there I know there's a lot of opinions on what we should do here I don't know. If anyone wants to chime in maybe Lee or dimma yeah I guess I could quickly present my option and recap some points I've made in the discussion Regarding why do I think, that my proposal might be a better option than an explicit operation code transfer with memo and the gist of The Proposal is, that + +[03:00] we make it. So from the client standpoint the mark addresses do not look like do not require any special treatment meaning, that you can create an st Val, that represents for example Max classic account and pass it as out to the contract and changes are only necessary on the contract side similar to the transfer memo option right the changes are necessary on the contract side to support this. Because if you're are talking about the approach or we literally have a transfer with memo function we are kind of reinforcing the memo concept, that is really problematic. Because like we have two different identifiers + +[04:00] identifiers, that the users need to care about and we put it deeper into the protocol into the token standards. And then we just expect down Downstream systems to handle it in a special way to and. If represented this singular address I am not sure. If this is going to apply like. If if this has worked for example I don't see why wouldn't like changes to the trade they constantly get issues with memers they could in theory build an M address and it back into memo and g account, that doesn't happen. Because that's very cumbersome and, that Reise on a lot of parties to do the right thing at the same time whereas in my proposal the only thing, that a v would need to do is well convert a Spring key into an address, which is like + +[05:00] call library function. So there is no need to somehow specially handle this and we are kind of reinforcing a good pattern of using Max accounts and the main benefit of Max accounts is, that they have a canonical single address representation and in general the user doesn't need to worry about what exactly it is only s it's they need to worry about is, that this is their address of their exchange account quote unquote and they need to deposit money to this account and this is the only thing, that they we need to worry about. So it's kind of the gist of the proposals there are definitely information details, that could be discussed. But I guess there was lot of position on the whole idea of doing this, which honestly I'm still not + +[06:00] sure I understand the reasons behind. Because again like the transfer M proposal is like doubling down on the memo hack, that has been existent in the protocol for a. While so yeah, that's kind of my side process here with your propos proposal to you have a transfer function, that would take two different types right it would either it could either take the address object is exists today or among yeah address object Bally I'd say the main requirement is, that on the client side Al XDR should deal with SC addresses, that represent maxed accounts can introduce a concept of Max contracts as well, that's the main requirement. Then + +[07:00] everything else is going into the realm of the implementation details and things may be adjusted for quity. If I think, that's needed like. But yeah in my original proposals area was, that since the contract types are they by the nature of our San design they're polymorphic right or they variant types basically right. So you can pass a general Val, that represents one of the about of types to the contract, which means, that the layer of the contract interfaces there shouldn't be an issue for a contract to be able to accept not only add dress object in a function. But either an address object or maxed address object. So is one way this can + +[08:00] be implemented U on like the level of the Cross contract interraction there is of course definitely some stuff, that needs to be figured out how exactly do this DEC how do we represent this interface at all. But I don't think it's impossible to achieve especially given, that it's kind of a narrow use case I think it could be a separate interface, that you implement. If you want to support MOS and this interface will be compatible with the interface of the Regional token contract. Because again protocol itself does not enforce any type restrictions Ty restrictions happen at the contract implementation layer. So I think yeah can do, that and the same goes for the event trade like we can do different things + +[09:00] about the events like we can a transfer event we can like ex option and kind of in favor of, which is another benefit of this approach is, that since we have Max accounts in the protocol what we could do is we could have a an additional field in the event structure itself for through this mares I don't know. If it is too wasteful or not. But at least this is an option, that we have, that does not require imited additional EV and also does not mess up index and for those who don't care about M. So you could have a set Vector of the multiplex Tes addresses in the event, which would generate automatically. So yeah, that's kind of one of the options, that this opens up as well. So the vent point you mentioned like, that's a, that's be done with either approach right where we + +[10:00] separate the max ID well it depends on versus goes like in the transfer memo option it has to go through the normal EV data structure. So it will either belong to data or topics with a protocol based approach we have an option of putting it into a sort field, that is not data and not a topic Oh you mean the XDR yeah in the XDR yes so. If you don't care about this you will not see it at all and this event will quite a normal transfer event you interesting okay so, that would require some Downstream like some work in RPC to expose, that additional information right. Because I don't think RPC currently exposes of course everything course I mean sure any option requires like some sort of additional work somewhere yeah and + +[11:00] yeah it's not an argument Pro con it's not a pro con of any approach yeah sorry I Wasing wanted to say something yeah. So I think like we're discussing a few different small features, that all come together into sort of these two different ways, that we could implement this. And so I think the four different features we're talking about is you know should the should it be an M address or should it be a g address plus a memo in the input. And then the same thing for the output the event. And then we're talking about should we reuse the transfer function the existing transfer function or should we separate the this use of M memers into a separate + +[12:00] function. And then should we reuse the transfer event or should we separate them into the transfer with memo event like each of these things are like separate decisions, that we can make I think some of what you've alluded to De about you know the SDK and you know what would this look like in the S Cas. So I understand I think. When we look at from the host interface yes all values, that come into a contract are a Val. So it's a little bit like. If you're a Java programmer and you're familiar with like object you know you can make your parameter coming into a function as object or you can make it some type and map to different types. So you know we could add nstk type, that is I'm not sure what we' call it any address I'm just going to use the term any address right. Now and address could be this MX + +[13:00] address or it could be the address we have today. So the MX address is an M address and the address we have today is a g or a c. And so we could do, that. But we're sort of adding a lot more, that I think every token developer is going to be exposed to and going to have to learn about and, that's like, that's a barrier to entry, that's a potential point of confusion. So why I think, that every token developer will be exposed to this is. Because we can't change we can't have, that token interface be something, that's extendable. So like right. Now the SDK has. If the function name is the same. So right. Now there's this token interface in the S an SDK and there's tooling built around, that by folks like. If you go and have a look at like the OpenZeppelin contracts and stuff. So there's like tooling being both buil bu up around + +[14:00] being compatible with it and we can't have like another trait, that provides a different type of transfer function, that has the same name and has a different type as the input so, that means, that somebody, that goes and implements you know one of those traits their code might not be compatible with someon the other type of the trait. If we have two complete different traits there sort of like it's very difficult it's not really simple in Rust to you know rust doesn't have this concept of like overloading functions. So it's just yeah it's a little bit difficult to figure out how do we actually make, that work. So I think like the develop like a narrative around like what a developer experiences. So like on the surface I think like yeah this you know trying to build everything into one function overloading the type these are all nice engineering these all seem like nice engineering decisions. But the narrative, that actually results in for the + +[15:00] user of the SDK is reasonably complicated like they need to learn about more things whereas most custom token developers probably don't need to know about MX addresses they probably don't need to know about memos like a lot of what we're adding for like Marx address and memos is for yeah I guess some specific UK US cases where the token Developers for those specific U cases would need to be aware of them yeah I'm yeah I've made a lot of like I've answered a lot of these questions in the discussion. But yeah I can answer again and I'd really like to challenge a lot of your points. Because well first thing is the developer complexity I think, that transfer memo option introduces developer complexity + +[16:00] arguably more developer complexity just in different place and this place is St like wets or block explorers and I would argue, that complexity in these places is much worse. Because instead of you know doing this once or a few times in places like OpenZeppelin implementations for example right you actually ask every developer to either resort to just present in this transfer with memo operation literally transfer with memo and you know we kind of just repeat the mistake of building them into protocol and we'll have funds lost. Because people just forget to paste memo in the operation or you have a requirement for the downstream developers to have the same level of knowledge of what M must do. But the work around again. If we like in case of the SDK. If there is at least some + +[17:00] possible way of building a samean solution for the developers, which we can discuss this rain firms options for the downstream it's basically up to them for the interpretation and I don't see a sane way of standardizing this and making sure, that you know every time address is an address but. If you encounter transfer res memo somewhere. Then this is not an address this is Max address and then. If you encounter an M address and user tries to make a transfer you actually need to resort to this different function and you need to break it down I think it's more complicated than messing a bit with r traes and the solutions, that are proc involve like what. If token interface accepted just a valve for examp example, which is + +[18:00] that's type safe. But you know token implementation just makes a check. If it is address in one case Max address in the other case. Then like we don't even need separate traes or it just move the check out of SDK autom magical type conversion into the contract project there just one option we can think about more my point is, that like I feel like fundamentally this is just a better place to put complexity. Because it's higher upstream and ultimately not many people need to deal with it especially like you know. If it's always once and something like opens up implementations. But but also sorry one last thing I wanted wait like one last point I wanted to make I don't agree, that most of the token developers should not care about memos I think they do. Because because like they probably would hope their + +[19:00] tokens being listed on the exchanges at some point sooner or later. So I think in either option they will kind of need to worry about this in one way or another either by implementing transfer B memo or Max Account Support. So yeah again I'm not sure. If we are saving something here specifically yeah. So I think one thing I think was interesting you talked about the you this idea, that like we want to be able to encourage madress use and we don't want to go down, that past where you know the two were separated. Because that was you know a mistake it was easy to forget them I think we're conflating some of the layers here so. When esses were added there were actually two problems to solve. So the first problem was people not forgetting the memo and + +[20:00] displaying, that information. So displaying, that combined information in just a single identify, that they could just copy and paste the wallet. Now the second problem, that had to be solved was how do we get, that information into the existing transaction XDR the way, that we settled on solving, that second piece is actually largely an implementation decision so, that first piece is like the actual problem the problem we were trying to solve the user problem the second piece was the implementation is just implementation details and the way we tried to solve, that at the time, which was largely led by just the existing structure of the XDR was to put, that memo ID next to the account inside this moxed account structure. Because we could actually do, that in a way, that didn't break a lot of the existing XDR. But the goal was really just to get the memo alongside each of these places where we needed to basically you know attach to a destination or attach it to the source. So I think like, that + +[21:00] last part is largely an implementation decision the Sol the problem, that was originally solved with MOX accounts is, that first part you user copy and pasting a single ID into their wallet and I don't like both of the solutions we're discussing here they both satisfy, that first problem, which I think is like you know. If we're going to continue to support MOX accounts, that's like, that's sort of the thing we have to make sure, that continues to get solved and otherwise like a lot of the those details about you know does the wallet split apart the M address wallets already need to know about M addresses. So like, that's not a big deal does the or you know they could potentially we could make the SDK easier at you know doing, that for them but, that's largely an implementation detail. So I guess I'm not really seeing like the connect with like how adding continuing to propagate passing + +[22:00] those. Because I mean in the XTR like to be really clear like in the XTR an M address is still two parts the Melo is still separated from the you know the public here sure totally. But the thing is, that again this transfer with memo we have like two separate contract call arguments and yeah of course it is an implementation detail. But this is a detail, that will cause a lot of bad decisions Downstream wait again one decision can we talk specifically about what decisions are. Because because something I just want to highlight is, that Downstream systems will want to filter these events by the G address not by the no I'm not talking about the events I'm talking about the C and again to make me Point Clear like I'm not even insisting on any specific solution to the events I'm talking specific about how is this function + +[23:00] being called I'm arguing, that transfer mem is harmful. Because well you have a functions, that has four arguments and downstream. When you're building this call and trying to get user input for it you basically have two options one option is do what you do every other time for every other function, which is like have a strict matin of a cous to input function arguments what we are proposing here with transfer memos is, that well in this particular case they will need to somehow either take M address as user input and split it into two parts or I don't know I'm not sure how this is supposed to look like and then. When we are talking about R, which again kind of us with a function arguments we're in exactly the same position where we kind + +[24:00] of need to in this particular case we need to do something about like this mapin of arguments, that is normally one to one and you know your code would be hey. If I say C address a render is a string key. But here is say no like you cannot render this as a string key you actually need to render first and second argument as this m spring key. So you know I doubt this will work and this is honestly like how again I made this point before like we can say, that transaction memo is an implementation detail just renders destination as an M address of memo plus was a destination account like, that works on paper. If you have a single payment. But we will don't do, that. Because it's like very unintuitive and error prone for + +[25:00] were you there yep just I don't know. If George would like to George is typing some things in here it sounds like he has some perspective to share don't know. If he wants to join the stage + +[26:00] sure yeah I mean I can just reiterate what I put in the chat, which is pretty similar to Demus points I think, which is, that having separate functions for this and separate parameters for this just reintroduces the problem at the user layer, that you mentioned Lee, which is, that people forget their memos right. So by separating it out you're just reintroducing, that possible confusion it seems to me, that. If we want first class support for custodial wallets, which is like the main user of M addresses. Then you would want first class support and transfers right and you would want, that to also be present in custom tokens. If if a token doesn't support M addresses right. Then you can't use it in the exchanges, which you know might be fine and might be a decision, that the token makes. But I feel like we would want to encourage, that rather than make it this very distinct opt in thing, that you have to + +[27:00] have a separate function to handle you mentioned, that people will forget the memo but. If an exchange is still displaying in M address and the wallet is receiving, that M address I mean. If the wallet doesn't know what to do with an M address it's going to failed there's going to be some sort of error but. If a wet knows what to do with an M address it's going to unpack it. So I'm trying to figure out where is the forgetting the memo paste coming in you're suggesting we would continue to use the M string key. But not actually propagate, that at anything. But the UI layer correct, that's the layer, that's like the input into creating a transaction, that's the layer where this the M address came about to solve the problem I see. And so the application developer would be responsible for + +[28:00] dissecting the M address rather than just passing it along yep now, that's, that's not without a cost I acknowledge. But I think the cost like this is we're discussing tradeoffs yeah and the cost in the solution I'm proposing is just using new version of spring key library. Because again it just Maps into address. But arguably things are a bit more complex on the con side, which yeah I'm not sure. If it's necessarily significantly more complex could you elaborate on what you mean by an M address maps to C address SC address sorry SC. So so basically. When because like what I'm proposing like I + +[29:00] want V to just wait do what they are likely doing. Now right it would paste the destination as a string key and you can just take this string key convert it into address SC well pass it to the co and this is like this is what you have to do. Now to support sound transfers right and the only you will need to do to support Max transfers as well updates the libraries things will magically work it is a lot less work and a l a lot less chances of just doing something weird yeah what typically happens in I think most SDKs George you can probably correct me. If I'm wrong about this or. If it's not most. But I think what most stks do is you give them a string. So you give them the M address or the G address. And then the SDK is the + +[30:00] one, that builds the MOX account on the classic side yeah whatever the XDR types are. So most applications don't build the XDR types themselves. But right, that's the point right. But say like do we make contract codes with just like strings and even Z like you still know need to know, which contract code to make right. Because your safe default should probably be transfer. But then or maybe the saet default should be transferred with memo but, that's not always going to succeed. So there is definitely some L of projects, that needs to happen even like ticated build calls just from the strings. But I'm not even sure. If that's the case right. Because conversion and building actual call is different and here like we are mixing the conversion and building the call. Because + +[31:00] currently again for each and every contract call the conversion is one to one you have like two string keys and an amount and this is argument to the transfer and here is say well here is like three two string keys and amount. But you need to M this to four arguments and sure, which where it should happen this is like how capsulated in it decay or V care about it or whatever client. So I don't know maybe there is some good Solutions there. But but I feel like it's kind of hacky and error prone. And then again as I pointed in the discussion like BL explorers may displays is differently as well so. If you were to check your transactions there you would be confused yeah. So I think we're I think like where these different ideas were optimizing for different things. So I think the proposal for transfer with + +[32:00] memo is like very simple to add to the SDK and the experience or the knowledge about what you or what you need to know about, that extension of a token is really isolated. And so I think like there are benefits to developer onboarding for contract development in the Stellar e system. Because of, that. And then I think the idea, that you're presenting is optimizing for there being one interface a single interface people need to learn with. So people need to know about this stuff. So there's more for them to maybe know up front as a contract developer. But once they know, that information it's a single interface. And then an added benefit of this is, that in things like stks or at, that client level you do still get like, that single view of the M address whereas with the First with the first proposal we don't you don't get like the single view. Because clients + +[33:00] need to unpack things. So I think like these two different approaches are optimizing for different things trading off different things I think something, that you mentioned before deer is like we don't we haven't really figured out exactly what the SDK would look like and what the experience would be there maybe we need you know assuming, that we have all the time in the world Maybe we need to actually do a spike there to make sure, that we don't have any unknowns and, that we have a Clear Vision for like what the contract developer experience is going to be yeah sure I agree, that prototype wouldn't hurt. Because yeah I want to make sure this makes sense aswi + +[34:00] I think maybe we could discuss the event portion of this. So like I earlier I listed out like there were like four micro decisions we sort of need to make within these decisions and one of those decisions or two of those decisions is around the event. So I think we've talked quite a lot about like the input the trans the transfer function versus the transfer M function. And then you know should it be G memo and M on the inputs for the outputs I don't think I saw in your proposal, that they would be an M address. So so is are both proposals consistent, that whatever the event is it would both the event would contain a g. And then a separate memo yeah say don't have a strong opinion we have options and the tradeoffs are like much less clear to me. Because well we could easily + +[35:00] put M addresses right there in the event and look the same. But the concern is, that it will confuse consumers, that don't care about htic rate, which is why we could do either separate field, that is protocal defined for this MOS, which has again a down set of B it's a protocal change and also it adds with four bites to the all the events just kind of bad. But of course you can just do whatever you would do with transfer this memo by just you know providing ways of getting the mar from and. And then you can do basically anything you want to structure and I think U you know there is no difference between transfer this memo and my proposal the scenar we. If the all resarch to split Max address into two + +[36:00] parts. Then they very can cze whatever events we think are the best trade of yeah the only distinctive thing about my proposal, that we had this yet another option of having M thiz as a parallel field to topics and data. But other than, that I don't have to strong opinion on what the event should look like yeah to's not here today. But I did run this by him yesterday like what u based off what he knows from a product perspective would it make more sense for exchangers to you know filter on or exchangers or anchors to filter on, that m versus a jress his opinion was a Gess I think we haven't I don't think we have like a survey of how people typically filter + +[37:00] but existing exchanges would be given, that historically there were just a separate G and a memo are probably filtering on G address oh yeah totally I kind of like we know status quo, that most exchanges use G+ memo MH. But they also don't use the events, which is what we're discussing here. So there may be honestly an opportunity here like to kind of. If people need to migrate anyway you can kind of try to promote well something maybe a bit. But I think for non- exchangers memos maybe kind of know to deal this. So right. But exchanges would be using Horizon and Horizon typically uhit imits in air quotes Things based on account. So I think, that's where like the + +[38:00] idea, that okay like exchangers they currently collect these events affects payments VI account. So we should probably just keep doing the same thing in a mid Advance buy account, which I think makes sense. Because like these topics are intended to be things, that you could filter on and so. If I'm running a system, that's catering for n number of events coming in to one g. But you know across like all those n number of moed IDs like it from a scaling perspective it's difficult how would you use an RPC to filter on all of those things you wouldn't you'd be just You' just have your system ingesting everything relating to the geod. And then splitting them out oh I see Nico he's on yes. So yeah I guess you guys were talking yeah there's the you know number + +[39:00] two right, that is the do you want to expose the G Plus memo or the m in the output right as an event I think maybe we should maybe talk first about number four in your list right, that is it a single transer event or is it multiple events. Because I think you know like they are like implications in this context like the way I'm thinking about this is, that the tradeoff is, that so. If we have separate events the nice thing about it is, that we keep the transfer events for the standard token contract as a you know fairly clean like it's the same thing we don't you know mess with it the problem with, that approach where you have two events is, that you need to do some sort of + +[40:00] reconciliation from The Exchange side. If people send. So assuming, that like the The Exchange requires memo. But for some reason somebody sends money and forgets to specify the memo they actually need to reconcile the those two event streams in some way basically to find the U the transfers, that happened, that don't have a you know transfer with memo equivalent and, that sounds a bit complicated to do I mean it's doable it's just like you have to kind of you do this kind of reconciliation on a per transaction basis. So yeah. So like I don't know what you guys think about this one. Because yeah. If if we can get one event, that's the other ation I starting a discussion right, that was I think the + +[41:00] the type of problem we have in, that context is how does extensions to a standard on as an event right like how do they look like right. Now like we're saying maybe we keep adding topics to the event the problem I see with doing, that. If we're not like super careful on, that is you know as you add more extensions to like how does, that work like you know to keep you know they are basically Index right in the in some arrays so. If you only implementing one of those extension you know extension number 20 does, that mean you have to put a bunch of voids you know like in the you know up to 20 basically, that's, that the type of things, that happens in, that World and yeah who manages the namespace I mean the name spacing + +[42:00] here being you know just a number. But there's a bit of, that question, that kind of pops up in this case it probably makes sense I think you mentioned this it would make sense for the topics to be in the data field not the topics and you would you. If you turn the data into a map. Then you only look for what you care about I age extending topics definely the extensions would have their own name of sort like maybe here would say memos or something and then, that's a string and then, that's how you can. Then have a custom data attached to it yeah, that's the cleanest option Le is, that what you were is, that what you were thinking too yeah my point was just to move them out of the topics in general I mean I think in general the memo probably don't belong there like + +[43:00] filtering on the memo IDs is getting a little too granular like we these topics are meant to be like things, that you could filter on, that you know most people are going to want to filter on. Because I think maybe George you can confirm this like I think the RPC or different systems have to index these topics. So like we've already got four topics like once you start adding more topics it's like PE yeah I agree I don't think the memo needs to be a topic. But the and for the same reasons, that you know Nik is mentioning too like having to add in all these voids like. Now we got six topics. And then like you get to like 20 topics eventually. When there's even more extensions this doesn't really it doesn't create an interface, that's intuitive at all I think. But I think this is like one of the nice things about the separate event the transfer of memo event is it's + +[44:00] very explicit like you know. If you want to ingest. If you're looking at an operation and there's a transfer and a transfer of memo and you care about me. If you don't care about memos for starters you just don't care about, that other event and. If you do care about memos. Then you can adjust, that one instead for, that operation. But like I was saying, though Exchange is care about both. Because they need to basically ingest those transfer with no MOS. Because that's basically how they can you know through tech support or whatever they can return the money to the sender yeah. So for them like there is this other issue, that's I feel like this has come up like. So many times, that there's this other issue, that okay. Now we have two events. So then they have to figure out not double processing them and I think we shouldn't not do this for, that + +[45:00] reason. Because we should solve, that problem or we should have a story for, that for how to solve, that problem it doesn't necessarily have to be solved today. But it's something we should solve whether, that be through like I think you know we've talked about ideas like an invocation ID. So then you just say like okay like this memo and this other transfer they're in the same invocations I don't double count the other one or something like, that yeah unfortunately I feel like invocation w't help here like there was an unrated discussion, which is unrated. Now about like what. If a token wants to charge s during a transfer for example and you'll have an for example to transfer events, that are actually separate transfer events in + +[46:00] general I feel like. If we were to come up with a good event linking mechanism it would be almost exactly the same thing as converting data into map like almost. Because what I think we could do is say like for the event we can say what is some special link in a and you know you can think of every event with assembly in caned is a part of the same event. But at this point honestly like why don't just make data map. Because I think, that's the only way really to prevent double counter is like you have unambiguous way to say like you know these two events are actually describing the same things, that has happened. So maybe map is really it's right way to go + +[47:00] sure so, that's what it's I guess we can maybe discuss it offline we have maybe 10 minutes left. But the one of the questions to answer is do we either use a new event or make a breaking change to the data field and make it a map is, that right does, that sound good I guess have a question I don't know how like consumers of those things you know what they do. But like. If if it was an extra field at the end of the event, that is added would, that really like be a much of a breaking in change basically like people are just using the index zero whatever like + +[48:00] you know the I mean it's already an array right. So like at theend you talk about the topic or the data isn't an array right. Now it's a an integer, that's the breaking change like making yeah, that's a pretty big breaking change yeah I thought it was like we had a vector already originally it was a map. And then I think it got changed to a vector. And then I think got changed to an integer okay. So yeah we have, that question to answer and for the transer memo prop like function + +[49:00] versus Deus proposal I think the yeah, that what we want to do is prototype what, that looks like in the SDK to see what it looks like so, that's the I'm guessing I think, that's the status of, that question, that is number three right go ahead, that's, that's a suggestion yeah I mean, that's I think it's a good idea my suggestion is. If we're leaning towards, that approach we absolutely should spike out what the develop contract developer experiences. Because I don't think for something, that I suspect will be as intrusive to the developer experience on the contract developer side I don't think we should trade, that off for the benefit of clients not having to unpack an M address. Because I think both of these things obviously have a cost there's a cost saying to wallet clients you have to unpack an M address to put it into the to the SDK. But + +[50:00] but, that's yeah I think I would prefer, that cost over MX addresses and Mos sort of intrusive intrusively entering every to implementation. But this is why to SP it I think just you know to see how bad, that is and to see what surprises there are I think you know one thing, that came up in the chat was in the past we've been concerned about M addresses accidentally being having balances stor against them and, that's one reason why we wouldn't want to add the M address to the main address type it just be. So easy for a developer just to store balances against the M. But then, that's not accessible to use by the G address requiring or something okay the I noticed, that even with this + +[51:00] proposal, that problem could still occur. If I'm understanding correctly like this m address will be convertible with Val so, that it can be used as an input. And so in theory you'd be able to store it as well oh I mean same can be said about transfer like you can do whatever sorts of stupid things in your contract like nothing PR you from storing some inders in storage for no reason yeah. But there's like. If something's going to become a thing, that people shouldn't do we shouldn't set them up to fail. But but also yeah, that's what I wanted to say like. If you're concerned about this like you can just make it impossible to make this keep prevent people doing themselves you know I'm not sure we can with the existing the way the rust type system is. So I mean obviously + +[52:00] we can do anything we want. If we Val implementation no we can do this as the host level yeah I'm not saying it's SDK level can just make it fail. But these types are just a contract type right is MX address cont it is a c Val like we can say like. If your leer key SC contains a maxed address you just will not trate it right totally do we could do, that. If it's anywhere in the anywhere like. If it's inside a VC or. If it's inside like a nested type or something like, that is, that what I'm saying yeah yes we have similar checks to what like we make sure, that you know cannot create AAL or C out of non-representable types and stuff like this. So defin have validations like, that not in the St specifically. But you know they pretty + +[53:00] flexible in host in terms of like restriction types. So really I was thinking about this as well like. If you're concerned, which is a valid concern like you know to not shoot yourself in a EG like you still will be able to probably extract an integer and. If you really want to you will be able to store like this for some reason. But the same can be said about transfer Le you need to be very intentional about doing something bad and we can prevent the most obvious mistake, which is we just write in Mar address window storage this is preventable okay. So we, that's question number, that's, that was the decision number three we still have to figure, that out + +[54:00] like a lot of complexity it feels like we're discussing a lot of complexity just yeah just to improve, that one little thing at the CL okay. Then for decision two I think we agreed, that we should, that the output should have G Plus memo right instead of just the mend and I guess one is tied to three right like the G Plus versus M for input. So I guess, that's something we still need to answer it are there any we're running out of time are there any other open questions sounds like we still have some stuff to discuss + +[55:00] yes I guess the question on the yeah single event or not right, that's number four by going back to this right like this one is it could be a I mean it's a breaking change potentially right. If it's a for like the downstream you know like systems right, that are ingesting I don't necessarily think, that it's U like as a breaking change like, that it's not necessarily like insurmountable right like it's a basically it's like kind of like the type of thing you do in you know in many situations we can allow like we have to allow in the token standard, that it's either a single amount or a or some or something else right. And then you have to dynamically Downstream. Then U do something different + +[56:00] based on either the type or. If it's you know I guess it's the type yeah, that tries this yeah you're talking about making the data field a map right. If it was a map or a vector yeah I yeah we've discussed this a bit internally there doesn't seem it is a breaking change. But there doesn't seem to be much opposition to this we can bring this back up. If if it helps in this case yeah I think to like the way we would do this. If if this memo thing is a separate Set and an extension we'd make a change to SEP 41 as well as the extension. So like SE 40 ideally we'd change SEP 41 so, that it says, that the field could be an amount or a map, that contains an amount yeah so, that. If folks are developing they're not they don't feel like they need to use just the + +[57:00] amount. If the for non-mo transfers yeah mount as an integer fi definitely has to be a part of the standard like we cannot decate it. Because the historical events have it already yeah would remove it we just say, that both all right. Now we're out of time. So this there's are there any other questions or concerns right. Now continue adding them to the chat and we can address them and we'll continue any open questions in the GitHub discussions all right thank you + +
diff --git a/meetings/2025-02-27.mdx b/meetings/2025-02-27.mdx new file mode 100644 index 0000000000..05b06dfc08 --- /dev/null +++ b/meetings/2025-02-27.mdx @@ -0,0 +1,285 @@ +--- +title: "DeFi Security Review and Upcoming Soroban Protocol Improvements" +description: "Discussion of the Blend V2 security competition and formal verification tooling, followed by protocol-level updates covering Soroban in-memory state accounting and muxed account memo support." +authors: + - alex-cordeiro + - alex-mootz + - armen-ter-avetisyan + - carsten-jacobsen + - chandrakana-nandi + - dmytro-kozhevin + - garand-tyson + - george-kudrayvtsev + - leigh-mcculloch + - nicolas-barry + - siddharth-suresh + - simon-chow +tags: + - developer + - CAP-64 + - CAP-65 + - CAP-66 + - CAP-67 +--- + +import YouTube from "@site/src/components/YouTube"; + +## Blend V2 Security Competition and Formal Verification Tooling Walkthrough {#part-1} + + + +This session focused on the upcoming Blend V2 release and the launch of a combined security audit and formal verification competition. The Blend team walked through the protocol architecture, contract layout, and testing strategies, with a particular emphasis on how external contributors can effectively find and prove issues. + +A large portion of the discussion covered practical guidance for auditors and formal verification participants, including how Blend structures Soroban contracts, how authentication and logic are separated for testing, and how Certora tooling (Sunbeam/Cavalier) is used to specify and verify protocol properties. + +### Key Topics + +- Overview of Blend as a universal liquidity and lending protocol on Soroban +- Core contracts and responsibilities: + - Emitter contract and protocol token configuration + - Backstop module for insurance, bad-debt absorption, and pool control + - Pool factory validation and pool contract behavior +- Blend V2 testing approach: + - Separation of authentication from core logic for unit testing + - Use of integration test fixtures to simulate real protocol usage + - Debugging strategies using Rust tooling and mock contracts +- Formal verification scope and workflow: + - Focus on the backstop contract for tractable verification + - Writing Cavalier rules using `assume`, `assert`, and nondeterminism + - Use of Rust features and summaries to modularize verification + - Running Sunbeam prover jobs and interpreting counterexamples +- Fee vault (ERC-4626–style) add-on contract: + - Fixed-rate and capped-rate yield strategies + - Heavy reliance on integration tests with deployed Blend contracts + +### Resources + +- [Blend](https://www.blend.capital) +- [Blend V2 Audit and Certora Formal Verification Competition](https://code4rena.com/audits/2025-02-blend-v2-audit-certora-formal-verification) +- Certora team blog posts: + - [Formal Verification Advantages](https://www.certora.com/blog/catch-tricky-bugs-in-less-time-using-formal-verification) + - [Stellar Audit Contests](https://www.certora.com/blog/bringing-formal-verification-to-rust) + - [Sunbeam WebAssembly Reports](https://www.certora.com/blog/formally-verifying-webassembly) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to this week's Stellar Developer Meeting today we have the team from blend joining us for a code walkr in support of their exciting news security Initiative for the Stell ecosystem I'm Carson as usual from the SDF Dev team and Alex will join us from script 3 and amen from Satur to set the stage here blend is getting ready to launch their blend V2 and in preparation for, that they're opening up, that code and for groundbreaking $125,000 in USDC competition focused on securing lend V2 through both a competitive auditing and a formal verification this is the first ever open competition competitive audit and formal verification competition within the + +[01:00] Stellar ecosystem. So it's an amazing opportunity for Stellar devs to try their hand on competing to find bugs and write formal verification rules I just get more familiar with blend in general. So I'll post a link to how to sign up for the competition started this Monday. So it is open. But let me. Now introduce Alex hello thanks for having me welcome Alex do you just let me know. When I am ready to get running go ahead you are all right perfect your screen all right thank you so yeah I am Alex you might see me online as mots2 and I'm the lead developer at script 3 and we developed blend, which is a universal liquidity protocol primitive just a quick overview what we're going to run through we might go a wee bit fast. Because there's a lot of stuff to cover. But I'll give you guys a quick for those who aren't familiar just a quick + +[02:00] breakdown of what blend is how the actual contracts all work together. And then I'll run into the code and kind of the main thing I'll try and focus on for those doing the auto competition is really going to be how we test blend and specifically. If there is some kind of bug or thing you found within blend V2 kind of some tips and tricks on how you it might be a bit easier to prove, that something is in fact wrong. So yeah blend is a universal liquidity protocol primitive and what does, that mean basically blend is a tool, that allows you know companies people who are crypto natives really any entity, that wants a lending Market to actually go ahead and have one. So lending markets the crypto are just places where people can land in BIO assets from each other and one thing, that blend does is blend basically comes with an automatic back stop module, which acts as Insurance bad debt protection and also scam protection and these + +[03:00] things these are funded by anyone and kind of the way this all works together is the pool will give some portion of the interest it generates out to the back stop module such, that it can provide, that layer of insurance for the protocol, that's just a really brief intro as to what blend does I want to touch on first actually how the contracts all work together since been looking at this from a security perspective knowing how the main components of blend interact is pretty vitol. So kind of the really like Cornerstone part of blend is the emitter contract this doesn't have very many responsibilities it's core one is going to roughly Define the admin protocol token in this case is blend secondly it's responsible for defining the back stop module. is like I said earlier the contract, that basically holds all of the insurance funds for the blend pools insurance + +[04:00] in this case are going to be blend USDC LP tokens. So there is a common AMM it's for those familiar with EVM it's basically balancer. But it's just an LP token for blend USDC its main responsibility is it actually controls the state of the pool so. If you have a lending market and there's a back stop module actually has the ability to turn the pool on and off. So for example. If something happens to a pool or it gets exploited some Oracle goes wrong the back stop module actually has the ability to disable borrowing for the pool secondly it's also the first less capital in the event of any bad debt and those, that might not know bad debt is basically the case where a user has liabilities and they have no collateral to cover it. So the protocol has to take a loss on, that debt secondly it also manages the blend emissions. So the protocol basically starting from the emitter blend tokens + +[05:00] will get sent to the back stop module and it's the back stop module's responsibility to send it to all of the pools within the blend ecosystem within the backstop module it also defines a pool Factory this thing drops pretty simple it's a contract, that contains a pool wasum and for any pool, that's created this is the thing, that validates, that this is in fact a blend pool and it is the exact contract, that we expect to be deployed. And then the meat and potatoes of the protocol is going to be the actual pool this is kind of the sort of a part of the whole protocol it is the place where people can go and lend and borrow reserves. So there's tons of different ways you can set things up. But this is kind of the place where all the lending and borrowing occurs and kind of where most user interaction will end up taking place. So I know, that was really fast + +[06:00] but hopefully is this is being recorded right Caren I think it is I'll take, that as a yes y it's recorded perfect. So yeah we can go probably go back and watch, that over. If anything was missed and I'm also available on Discord. If you want to ask further questions. But we will jump to the actual codes since, that's probably what a lot of you all are interested in. So I'll give a quick breakdown of how contracts actually exist in soron. And then we can start trying to dig through some of how this how our codes actually tested. So for those, that aren't familiar The Way We have set up our soron repository is you'll notice, that all of the core contracts, that I talked about earlier are located within their own folder. So for example all the code for the back stops within the back stop folder pool within the pool Factory Etc + +[07:00] Etc each contract has a contract. RS file this is really the entry point and interaction point for the contract. So you'll notice, that in soran there are these macros contract client these are really the things, that defined how users actually can call into the contract. So anything, that's public facing is part of this trait and will be implemented for the back stop contract stru this is also the location where we probably have all of the most user-facing documentation for each of the functions so. If you're ever curious about what's available on a contract what the arguments mean this file will be the place where they're documented. So yeah and there's also a contract implementation macro. So this will be the place where a Constructor is defined. So this is basically called. When the contract's first deployed and can never be invoked again and there's also the + +[08:00] back stop trait implementation for the contract and this is where all of our code is basically getting defined. So everything in here is exposed for anyone on the blockchain to interact with the way we structured things is we generally do all of the authentication, that's required to use the contract here. And then we actually perform all of the logic in a second helper function so, that we can better unit test the code as a whole. So you'll notice. If you're ever looking for authentication things you'll notice, that all of the O actually takes place here. If you are from the EVM world o how Soroban does o is probably one of the largest differences between the two blockchains. So it is worth looking into it's incredibly well documented on sorond Sor bonds documentation. So I would highly recommend reading through how, that works. So you and + +[09:00] then you'll also notice, that for example. If I go to where we actually Define the logic this is pretty common throughout all of our contracts. So we'll basically try and wrap all of the logic, that's external to authentication and stuff within a function and we actually go through in write unit tests specifically for those functions. So this is a pretty good place to look. If you think something might be incorrect or wrong or a bug exists somewhere this is probably one of the easiest places to start testing, that and attempting to find whether or not something's wrong and you'll often find, that this is like kind of where most of our unit tests actually occur secondly one thing, that we do include within this blend contracts repository is there's mock contracts and there's also a test Suites folder. So one thing, that's really nice about interacting with soron is as I'm using tests you'll notice I have rust analyzer installed on + +[10:00] my fsco. But you have the ability to step through debug so. If you're ever curious about what actual values are within a function you're you can use all the rust internal tools to step through and actually debug it. If you're like most developers in the world you're just going to add a whole bunch of print lines. And then run the test, that also works fantastic too. But it's really nice for example. If you do think something's wrong make sure to use the rust tools and use all the debugging tools to your advantage we've been able to find a lot of things internally using those strategies. But one really important thing I wanted to touch on especially for L contracts is we have this giant test Suites folder we use a lot of this for all of the integration testing. So for example there is a test fixture this basically just contains a whole it's basically going to deploy from a fresh environment a whole bunch of contracts and things, that exist + +[11:00] to basically support a pretty standard blend pool. So you'll notice it's going to make tokens it adds a whole bunch of users it deploys all the contracts out for you and you can basically. Then start from a really good starting point to actually write an integration level test one quick thing to note here is, that. If you ever see an our test the user Bombadil I'm a bit of a Lord of the Rings nerd. So he is the admin and ruler of everything. So he has the power to basically mint tokens at will he's kind of the admin the super key for roughly all of our tests as well. But how this is actually implemented you'll actually notice. If you look back at some of our older audits we've written integration tests after basically fixing issues, that have been found one of these was an inflation attack, that I believe C Tor found in the first audit. But for example this kind of walks you through a really good way + +[12:00] if you think something does exist, that might have an issue writing one of these level tests is a really good way to actually prove whether or not it exists since this kind of is what at this level of test this really is the place where you're going to have the same level of interaction, that someone using the blockchain would. Then yeah maybe the last thing to note before I switch over to the fev VA here is you'll notice on the test fixture. When you cre the test picture there's a true false just. So it's clear you have the ability. When running rust integration tests to actually deploy the contracts and use WMS so. If you want to use the actual wasm files, that are being deployed, that's great we do, that in plenty of tests but. If you do, that rust itself won't actually be able to debug the code within the wasin packages so. If you're expecting some of the prit lines or line by line de debugging to + +[13:00] work you will need to deploy all of the contracts as rust crates, that way you can use all the unit testing tools. So yeah, that's probably all I'm going to touch on here again within test Suites and test this is tons of really good examples on setting up all different kinds of environments. If you want to go through and attempt to prove, that something is incorrect lastly I'm going to also run through the Fe Vault contract this is basically a add-on contract, that's aimed at wallets, that will allow users to specify either a fixed rate of return or excuse me a CAP rate of return. So for example. If a wallet wanted to offer say 10% on USDC deposits using blend they can deploy one of these Fe volts. If the blend pools returning more than 10% they would keep, that extra + +[14:00] say it's returning 15% they would keep 5% 10% would go to the users or they can just take a fixed interest take off of all of the interest generated for deposits through this Fe volt. So it works pretty similar similarly to any er ERC 4626 function or 4626 Vault token. So those, that are familiar or I've looked at those kinds of protocols in the past this should feel somewhat familiar to you I did want to call out one thing you'll also notice some of the implementation patterns we use in blend also come over here. So the only thing, that's maybe a bit different is this contracts a bit simpler. So you'll notice we've kind of there's probably one less layer of abstraction there for you to have to deal with most things are documented pretty well in code but. If you find anything please feel free to call it out you'll notice, that in the test + +[15:00] here it's set up a bit differently. So for example this is a very long test. But we mainly are testing the fee VA through integration testing since the contct is a bit similar simpler and it relies heavily on the blend contracts themselves. So you'll notice, that we support a blend rust crate, that includes all of the blend contracts themselves this has been updated for the exact hash, that was pushed for the Auto competition. So you'll notice, that I believe yeah the version is here. But this comes with a very similar deployment script as the one we use within the blend contracts repository for their integration test. So let's see. If yeah there's a blend flixer deploy function, that will actually go ahead and create all of the + +[16:00] blend contracts needed to interact with a very similar setup as to what blend view1 main it is so. If you do run into anything with the F VA this is kind of a great place to look to go ahead and try and prove, that something INF fact is missing or has a bug. So yeah, that's probably all I wanted to touch on I wanted to make sure I left a little bit of time for Sor to go through the formal verification portion of the auto competition. But I'll probably leave it there unless there's anything else you want me to touch on Carson great let's wait and see. If there's any questions at the end. But thank you for the presentation I'm going to invite Shandon Armen on the stage welcome both of you hi Armen are you there okay yep hello you have my screen + +[17:00] so this is the contest repo. So trra you can let me know yeah we should walk over sure yeah you can share and I'll just go over it. So as Alex mentioned in his presentation before you know we are doing this audit and formal verification contest for blend and. While the audit itself you can you know all of the prog the code bases in scope for the verification part we have a smaller scope. Because the protocol is there's a lot of code. And so we decided, that it would be more tractable and manageable. If we tried to focus on one particular part and in particular we are looking at the back stop crate so. If you go up a little bit in this repository. So for what it's worth this is the repository where all the code is. So you can clone this repository. And then you can check out all the different crates and + +[18:00] in particular the one, that we are talking about is the back stop as you can see Armen has it here and I want to show the Sora specific stuff. So first of all the properties, that you're going to be writing are going to be in the seras specs directory where Armen is right. Now I think I have a couple of little. So well. Now can you go maybe look at some of the other files there's a few, that I provided just for an example yeah. So here's a few examples of things, that you just to give you an idea for what these look like. And so you can notice here, that there is this thing called cvlr uncore assert in all the rules. And so the things, that are so, that's one thing I want to talk about a little bit so. If you look at line number five this is the we call it Cavalier. So cavaliere is the specification language, that we've built it's just a rust Library. So you can import the library. And then you can use the various + +[19:00] assertions and there, which are just macros, that we've defined. So assume assert and satisfy I think are probably the most common ones and the ones, that are I think we recommend using the most. If you need something more specific. Then you can always reach out and we can help you figure out how to do, that the other thing worth noting here is can you can Arman can you go down to the cargo Tomo file at the bottom yes. So we are also relying on rust features. So for example here you can see on line 17 there's this feature called Cur and the nice thing about this mechanism is it actually lets you use this feature to selectively compile the code. So for example you can. If you go back Armen to let's + +[20:00] see let's go in the back stop yeah and maybe in the withdrawal maybe go to deposit actually okay perfect so. If you look at the top of this file you'll see, that we are using this feature called sora and essentially what this is saying is, that. When the feature is enabled and I just showed you before, that in the cargo TL it is enabled. Then use this mock implementation of the token client and otherwise you can use the whatever default one, that blend was using and this is just for verification only right so. When you're compiling your code for other purposes. Then you shouldn't be using this but. When you're doing verification this is helpful. Because a lot of the times we want to modularize things. And so you may not need to include all the code for the token. When you're really just verifying the code for the back stop right so, that's an interesting feature, that I think you'll probably use quite a bit and you know always feel free to reach out on + +[21:00] Discord. If you have any questions the other thing I also want to highlight is the con files. So on the left you will see, that there is this directory called conss and here we have provided some setup and some skeleton for some basic conss. So let's look at the one yeah the withdraw one is a good one. So what's happening here is we have this file, that has a build script this is already provided you probably should not have to modify this at all it's just a python script, that does a bunch of stuff, that is necessary for making sure, that the reports look nice you know you have all the files uploaded there it calls the right build instruction in this case we're using this file called this build system called just. But other than you should not have to modify, that more or less. Then there's a few verification specific things. So for example there's this thing called we have this slide called optimistic Loop we don't need to worry + +[22:00] about what, that is I would leave, that there's also this precise bitwise Ops flag, which is set to True what this is saying is, that. When you're doing the verification use bit vectors you may or may not want it's you can. If you're facing like slow running times I recommend taking it out and seeing what happens and again. If you run into any issues. When you take it out you can reach out and we'll try to help you. And then really the main thing you will have to modify is this rules field. So whatever rules you write you just have to add them to this file. And then you just run this San with this configuration file I one thing I would recommend. So it is very common for users to run the prover with many ruls and, that is what you will do in the end but. When you're debugging and. When you're just you know trying to test things out it is often helpful to just + +[23:00] run one rule at a time. So you know, which rule is problematic or you know, which has a Vu problem or all kinds of stuff right. So definitely keep, that in mind. When you're just getting started and you know writing these as I can tell you there's a lot of excitement already on Discord lots of people have been asking lots of questions really good questions. So it's already really nice to see the engagement from the community so, that's, that's really cool. So I think let me just think what else I would like to show actually yes can you Armen. If you click on the documentation page, that you have open. So yeah here we have some documentation for using Sunbeam. So there's the installation guide, which I think you probably don't need to worry too much about. If you. If you have installed ctra CLI you should be good to go. So I don't think you need anything else. So the user guide is the main thing you know we have some examples of you know what the + +[24:00] different components are. So for example here you know you see this like hash rule thing on top of these functions. So really what's Happening Here is you're just writing rust functions right. So Cura is like interpreting those functions separately especially for verification. But from your point of view you're just writing rust functions with special macros and special annotation so, that's one of the annotations is this rule attribute and I already mentioned the CVL assume assert macros from the Cavalier spec Library another thing, that is very useful is this notion of non-determinism. So oftentimes what happens is. When you're doing verification you might want to sort of summarize some piece of code. Because you know maybe, that's not super relevant for the property you're verifying and all you care about is, that function returns some + +[25:00] non-deterministic value right it could be a u64 it could be i1 128 whatever it could be a user defined type all kinds of Stu. So this nonb gives you a mechanism to do, that. So Cavalier actually already implements a bunch of non-ets for various primitive types blend has a lot of userdefined structs right. So for example there's the Q4 withdrawal struct you might want to implement, that trade for this truck right. So you can do it this way. So right like as there's an example here. So essentially all you do is you know you assign non- debt to all the fields of the struct and you're good to go. So this is something, that's really helpful for verification we often have to do it and I'm imagining other people will have to do this as well we have some very basic examples in the tutorial. So we have a Sunbeam tutorial over here it gives an example of the token + +[26:00] contract so. If you've never done verification I think this is a good way to start you can just do the exercises here and you know just try to understand like what's really happening. When you run the tool. And so on and other than, that. If you go back I think I already explained the scripts the build script stuff. If you scroll down a little bit more Armen yeah I think they these are already covered say. So I think the main other thing I would recommend looking at is what happens. When you do run Sunbeam. So as I mentioned before and it's actually also listed in the readme for the repo all you do is you just run Cur San proofer and you pass this Con file and you run it and all you get is a link to a run right and this link is going to look something like this right. So here you can see well. So here there is no violation. So you're fine. But + +[27:00] if there is a violation you can see a call Trace the call Trace is not super easy to read so. If you are stuck with problems with the call Trace please post messages on Discord and we'll try to help you as much as possible you can also see the code right. So for example here on the left you have this files you can see all the files, that you used to run verification all the properties, that you ran all the code, that you changed or properties you wrote everything is here. So and I would always encourage you to. If you are having any problems you can just send me a link to a run. And then I can take a look at the run I can reproduce the Run locally and help you debug your problems other than, that I'm not thinking what else would be useful I think about, that's, that might be all I have like oh actually I would like to mention one other thing. So we already saw the feature CRA + +[28:00] feature of mechanism where you can selectively decide what functions to include. When you're compiling we also saw the non-dead macro another thing, that you may have to do is summarize various functions. So this is something, that you know for example here the way the best way to do this is the following right. So in the C specs directory you there is a directory called summaries and in there you can. So the mod is, that's where you just expose all the summaries, that you have written. So here there's you know this file called emissions and in emissions it's possible, that depending on the property you're proving you might have to summarize right. So for example you can say, that I don't really care what emissions does as long as it's returning a Nonet terministic value or you can, that well it should be a nondeterministic value. But maybe it should be in some range. So this kind of stuff right like it really depends on + +[29:00] your understanding of protocol and like what you think is necessary for whoever is calling this function to know about this function Behavior right. So this is actually often very helpful. So you can put all your summaries here. And then wherever you're calling the function instead of calling the original function you just call this function and it would be really good. If you actually annotate it with this. Because then you know you're not really changing the semantics of the original program great I think we need to wrap it up here. But thank you so much for this presentation I think this was very helpful for everyone who's going to participate in the competition to get to walk through both of the code and the verification here. So thank you all for joining I didn't see any questions in the chat. But yeah feel free to reach out also in Discord. If there's anything I'll post the link again in Discord for the competition. So good luck to everyone + +[30:00] the competition is running until March 17th. So still have plenty of time to dig into it thank you everyone for joining thank you Shandra and Alex for joining here us thank you everyone thanks everybody + +
+ +## Muxed Account Memo Support and In-Memory Read Resource Updates {#part-2} + + + +This protocol-focused discussion covered updates to Soroban state accounting and ongoing design work around muxed accounts and memos. The main goal was to ensure Soroban can safely scale in-memory state usage while remaining compatible with existing Stellar patterns such as exchange custody and multiplexing. + +The group also reviewed design options for representing muxed accounts and memos in events, with attention to developer ergonomics, backward compatibility, and downstream consumers like Horizon and indexers. + +### Key Topics + +- In-memory Soroban state accounting: + - Shift from disk-based bucket list sizing to live Soroban state sizing + - Separate treatment of contract data vs instantiated WASM modules + - Adjustments to rent fees to reflect in-memory costs + - Flat per-kilobyte write fees decoupled from live state size +- Contract code size considerations: + - Accounting for expanded in-memory size of instantiated WASM + - Using on-disk WASM size for limits and write fees to preserve compatibility +- Muxed account and memo handling: + - Rationale for supporting muxed accounts in Soroban tokens + - SDK-level abstraction over regular and muxed addresses + - Evaluation of event representation options for muxed destinations + - Tradeoffs between consistency, event size, and consumer breakage +- CAP alignment and cleanup: + - Confirmation that CAP-64 is no longer needed + - Continued discussion and iteration on CAP-65, CAP-66, and CAP-67–related changes + +### Resources + +- [CAP-0006](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0006.md) +- [CAP-0065](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) +- [CAP-0066](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) — [Discussion](https://github.com/stellar/stellar-protocol/discussions/1585) +- [CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) — [Discussion](https://github.com/stellar/stellar-protocol/discussions/1553) +- CAP-0067 Comments: + - [Dima](https://github.com/stellar/stellar-protocol/discussions/1553#discussioncomment-12306846) + - [Leigh](https://github.com/stellar/stellar-protocol/discussions/1553#discussioncomment-12309408) + +
+ Video Transcript + +[00:00] Protocol 23 we are introducing the hot archive bucket list where we will actually store archived entries in a separate database and what this allows us to do is to store all live sorb on state in memory as well as store andan module caches in memory and we can do this safely. Because the rent system is St archival. So what we were talking about today is an update, that we made since the last time we talked particularly about the way, that we calculate the what today is called the target bucket list size. And. So in Protocol 22 whenever you pay rent or write an entry to The Ledger this fee is variable based on the current size of the bucket list, which is just the size of the you know network's database on disk and essentially we have a Target size and. If this Target size is exceeded. Then the cost of right and R bums increase + +[01:00] Very rapidly and essentially this allows us to have a soft CAP on the current size of the bucket list or the network database from a fee perspective. So you can technically still write but'll just be very expensive to do. So. So people will stop writing. And. Then eviction and styal will do a back pressure to overtime reduce the size of the bucket list. Now the thing is previous to Protocol 23 this was all measured in database size. Now there were two issues with this the first one being, that this is a sorond based fee and most of the sorond state or most of the core database wasn't soron state classic State significantly dominated the size. And. So we had this weird system where even, though Soroban took up very little space in the bucket list it was being charged fees whenever classic entries + +[02:00] Would make changes. So, that's kind of issue number one, that you have classic influencing the cost of Soroban State. Now the second issue is, that with Protocol 23 we. Now have a bunch of storage or all the Soroban state, that we actually store in memory instead of on disk. And. So it would our kind of goal with this setting is to make sure, that the protocol can CAP the maximum amount of memory, that Val to use it at a given time and. If we were to continue to use just the disk based metric this is no longer able to actually CAP the amount of memory, that you need to store. Now, that we cach everything in memory. And. So the update to [CAP-66](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) is, that we change the bucketless target size byes to soron LIF State Target size byes. And. So what this means is, that instead of using the entire size of the database to calculate rent fees and write fees we only use the + +[03:00] Size of the live Soroban state, that we actually have to store in memory. Now there's a couple of details here particularly I want to talk about contract code. And so. When storing. So the plan is to store all contract data and all TTL entries in memory. And. Then to store instantiated contract code in memory. Now contract data and contract TTL the thing, that we store is the same size as the entry on disk. So for instance like. If you have a contract data on diss, that's The Ledger entry is you know 64 bytes. Then we store 64 bytes in memory. So there's a one to one ratio for data and TTL for contract code this is not the case. Because of the module instantiation cach changes or the instantiated module cache we are not actually storing just the contract code bytes in memory. But we're storing an instantiated model and this model can be up to 40 times the size of the on disk + +[04:00] Wasm bites in the worst case. And. So what we're doing instead is, that for contract code in particular instead of using the size of the contract code entry for fees, which is what we' been doing up to this point you're going to instead use the size of the memory given by the instantiated contract code module. And. So in the worst case this is a 40 times increase. But in the average case this is only a 10 to 15 times increase over the size of the contract code as it's calculated today. And so, that's change number one is, that essentially. Because we need to account for the in memory size of state. Now instead of the on disk size we need to make this adjustment in the contract code size calculation. Now the other adjustment we're making is with r fees. So for rent fees essentially you are renting under you know Protocol 23 you are renting space in the in + +[05:00] Memory cach of the network, that's essentially what rent is doing. Now. And. So we the rent fee will remain unchange how we still have like this target live State size. And. Then the rent fee will you know increase very rapidly. If the of Life State increases this for right fees doesn't really make sense to have the right fee a function of the current amount of Life State. Because we're to using memory based bounds instead of dis based bounds in the Life State we a dynamic wrry doesn't make sense. And. So what we're proposing is, that we still need to have a right fee. But you are not it doesn't need to be based on the current size of the bucket list rather it just needs to be based on the computational cost of doing a right. While you're applying the transaction. And. So we're making a + +[06:00] Change, that right fees are. Now a flat fee per BTE or per kilobyte rather. And. So instead of having like this curve the rent fees will still have, that curve. But the right fees there'll be a network config setting, which is fee per 1 kilobyte R. And. Then this will be applied based on the size of the entry you're writing and this will be applied to both Soroban entries you write as well as classic entries you write. So, that's, that's the primary changes essentially we are changing the way we calculate contract wasm size respect to fees rent fee is still variable based on the amount of Life state right fees are. Now flapped any questions or concerns + +[07:00] Morgan's question about the cheap storage we don't it's not. So much, that the storage is cheap. And. Then non-e it's, that before you hit the target the storage is reasonable reasonably priced. And. Then past the target the storage is like very ridiculously priced. So the idea is, that you know we still even before you hit the target we still charge fees and the intention of this fees is to dissuade people who aren't using like who aren't legitimate apps using the space. But essentially the reason, that we have this target is, that we don't want a Dos angle where an attacker could just write you know gigabytes and gigabytes of state, that we all out store memory. And. Then oom kill them nodes. And. So I don't think the distinction is cheap versus non- cheap it's you know reasonable versus like ridiculous pricing, that we only you + +[08:00] Know resort to in kind of the to protect ourselves from malicious attack see any numbers for the threshold I think exact numbers are TBD for reference we're are. So currently the we use bucket list total size. And. So I think it's like 12 and a half gigs or something like, that the target something in, that range. But. Because we're switching to only meter the life Soroban State it'll probably be something on the scale of hundreds of megabytes just. Because today the total sore on + +[09:00] State size is like 40 or 50 megabytes. So, that still gives us you know lots of growing room. But is a very small you know value requirement as far as memory. But again you know we still need to think about these numbers a bit more as it's kind of you know on the range of what we're looking at yeah to add to, that since vasm kind of takes more space. Now than it used to take in the database yes we need to re-evaluate this in memory State size including the module cache and I don't think they have done this yet. So. But the it is I don't think it will be significantly bigger than tens or hundreds of megabytes and threshold to be set I think quite High compared to, that probably it can easily be a few times higher, that you + +[10:00] Know we didn't get, that much dat in a year I don't think like. If you said it to 2x 3x of the current state I don't think we'll run out of space anytime soon yeah and to just to make it clear for Morgan's question like we like the purpose of these limits is not to you know limit good users. So you know. If we see a dApp, that has 100K daily active users I mean, that's not going to happen overnight there's going to be a ramp up. But we would definitely probably you know introduce the slip SL SLP to raise those limit. So the intention is not to you know reduce actual good usage of network + +[11:00] Yeah and I guess Nico pointed out a good point, that I forgot to mention is, that. Because we are switching from on disk metering of contract code to in memory metering the size of the current instantiation model is protocol dependent. And. So for instance. If in the future we change to a like a g instead of interpreter. Then the size of the instantiated model will increase. And. So I didn't mention this. But well for fees we are for the rent fee in particular we are using the inmemory size of the contract code. But for limits and for the right fee we using the on disk size and we have to do this. Because for instance like we have like a maximum contract size config setting and. If you can assume, that you know a contract today has the maximum size we wouldn't want to break, that contract in a future + +[12:00] Protocol upgrade. If the inmemory size just happen to change. And. So yeah I think, that's, that's a good point I forgot to mention is, that only the rent fee is dependent on the contract in memory size and, that means, that like a and, that's and the target sorond life State size is also calculated using the inmemory size of contract code. But as far as write fees and transaction WR limits and contract code size limits those are still determined by the wasum and we have to do, that. Because only the actual on disk you know wasm size is consistent protocol to protocol actually. So my comment was actually a little bit different like. Because I had a chat with grer about this and it sounds like the current P request we have + +[13:00] For [CAP-65](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) is actually caching per host basically. So like. If you have like two host like core can be configured right with multiple host to support multiple protocol versions and right. Now we keep in memory the version of the WM basically like the pre-processed version per host. So it gets basically like. If you have like two H in memory is going to have two versions of the wesm cache per contract and, that's kind of a to make it easier to reason about. But at the same time, that means, that the overhead in memory is actually dependent on how core is compiled yeah I think this is I agree there something to look out for. But I think this is primarily an implementation detail at this point. Because I think in the yeah no it's totally is it's just like + +[14:00] That's why I said there needs to be an SLP to kind of discuss, that. Because the your memory limit basically, that you pick is has to take into account the fact, that you have basically a overhead of two or 3x for you know for the resident wiom, that maybe is not as trivial as you know. If you look at calibration. While like. If you're asking one host how much memory you know are you using right. Now, that's actually not the truth you need to basically lie through the network settings, that you know you actually are going to count was them in memory let's say with a 3X you know a multiplier on top of what's actually using per host does, that make sense it's basically the yeah the calibration settings yeah I think + +[15:00] Longterm like a like. So longterm. When we actually have like large amounts of States say like in the gigabytes. Then I think we can probably avoid this issue by doing something clever like you know like. If we are armed for an upgrade. Then we can like you know like do the compiling for the new protocol version in the background laely. And. Then serialize it such, that we don't have like the double memory overhead. But. But I agree as like as far as like short-term and medium-term plan goes I mean I think like the state size is still small enough, that like we can set limits like with these assumptions it' be fine. But yeah I agree, that we should have these this like 3x factor in mind cool I guess. If we don't have any other questions or comments about this we can I'll hand it off to the next + +[16:00] CAP right thanks Garen for presenting this and yes the next thing we have on our agenda today is [CAP-6](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0006.md) to7 and specifically the issues, that concern handling of transaction memers and maxed accounts in the event not sure. If CAP is even an appropriate link to share right. Now. Because most of the revant stuff is captured in the discussions to the CAP and I think the point of contention during the previous meeting was how exactly can we + +[17:00] Enable maxed accounts in soran and after spiking a few options considering the fact, that it is likely for pretty much any custom token to be interested in being listed in centralized exchanges and as being compatible with custo Vols, that centralized exchange may use for multiplexing it seems like this is something, that most of the tokens will want and I think this is one of the key requirements, that kind of made the whole discussion pretty hard to converge on. But since kind of came to an agreement, that this is probably a reasonable feature to have and contracts will need to deal with Max addresses anyways the current preference is to + +[18:00] Go with an option of add in simulus support of Max addresses to the SDK and what tokens extend well not extend. But update the transfer function interface to be able to accept Mark addresses I don't know you do want to add something we di into details you asked to speak yeah I think maybe it's just worth calling out for people who listening of the conversation last week I think an assumption we were making to be really clear it was an assumption, that this needed to be an extension and I think you know to has sort of pointed out, that it's safe to assume, that pretty much every token would be interested in being listed in an exchange you know. If if, that was. to happen. And. So there's + +[19:00] Little reason to make this an extension where it's sort of targeting a very small number of tokens like really just sort of all tokens should be able to handle the case where they're given a Max address or a memo. And. So much of the complexity I think in some of the spikes sort of disappears once we don't try to make this an extension right. So to summarize like what we are currently going to do with a protocol is, that we will add a new object type to soran specifically for handling maxed addresses max accounts currently But I'm actually not sure. If you want to do contracts or not. But we could we wanted to and the interesting thing about it is, that since it's in the + +[20:00] Protocol well the primary reason for why we are doing this work is for the tokens to be able to support this basically any protocol will be able to use them. If they want to. So. If custodial Solutions are necessary some other protocols basically the solutions for single address, that can have multiple sub virtual sub accounts they will be able to implement this and I think this is one of the benefits of this it hasn't been discussed before even, though like we don't have limited obvious use case. But know folks may come up with something I definitely recall having recall seen some discussions here on Discord regarding virtual accounts for multiplexing support + +[21:00] so yeah we had this new object type and also at the SDK level we make it. So the regular address type and this new Multiplex address type are wrapped in the same SDK type, which means, that for example. If contract just operates on the addresses and it doesn't know exit your token started except in multiplexed addresses for the transfer as will break. So for example. If contract a calls a token contract and passes a regular address non Max address to it things will keep working even the token contract updates to this new proposed feature, which is to allow transfers to have multiplexed destinations and sources and this is what has + +[22:00] Been spiked and this seems to kind of work and in terms of complexity on the token side it doesn't seem like there is too much it's few additional lines of code to convert from multiple addresses back to normal addresses, that can do all the interesting things, that addresses can do, that's kind of a hell level thing I am not sure. If they need to go too deep into details right. Now I guess the conclusion from the discussions for is really, that seems like we have a way of making this work in a non disruption non-disruptive fashion meaning, that the existing contracts will not be broken and it is also possible for the clients to discover. If a contract reports multiplexed addresses at all or + +[23:00] Not, which may be relevant in some context right. So I think this part is more or less clear part is still unclear and I wanted to talk about a bit more is how exactly are we going to represent the multiplex destinations and the events and the option of just putting them into topic is problematic. Because it would break index. If they don't do anything special this Multiplex addresses break in a sense, that they will have too many virtual destinations, that they really shouldn't be caring about. So in the link I posted yeah thanks Le for posting other discussions yeah in the discussion the da post ly has listed different + +[24:00] Approaches approaches to actually how to handle the events with mlex destinations and I think we haven't reached the full agreement on this. But again it seems like from my own preference and I see Alex has commented on this seems like what we could do is you could just converge yeah I say want to talk. But I guess the current preference is to converge all the possible memo, that we currently have support in the transaction converge everything into the single Multiplex address data structure. So, that for the classic transactions you will be able to generate Multiplex destination based on the transaction memo even of this transaction mem is non ID and for s b + +[25:00] Use cases you only support ID MERS to kind of ruce the potential amount of confusion I know Alex. If you want to okay Alex doesn't want to talk I don't know. If you want to yeah no I just you know my comment was basically my preference just as one point of view on like, which one we should use I guess I do have a question. Now thinking about it like would this cause any breakage to how people parse M addresses. If we were to expand M addresses to actually cover all types of memos well it depends on where exactly we put it in the sketch I've posted above the above in the same discussion I'm actually only extending SC address + +[26:00] Address type, which means, that classic Max account data structure will stay as is and the reason to keep it as is, that it's kind of over the place in the protocol like a lot of transactions have maxed accounts as sources or destinations and yes there is really no good reason to kind of retroactively Plum all this memos into the classic Max account type and yeah this m will just remain in address and it will be just yet another special SC address as the remainder of with [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) we introduced like chable balance address and liquidity po C address and both can only appear in the context of this unified events sources of or destinations of payments and this is basically the Third special address kind + +[27:00] That only appears in unified events coming from classic I cannot say this will definitely not cause any breakage. Because well we are kind of in order to achieve this we need to not well kind of break right the token events by converting the data field from a single integer to a map there is definitely this breakage and you know. If someone there something to address they may or may not be broken. So PR definitely some cost to it. But it's not really specific to this proposal of adding this maxed address with memo whatever new type. Because we kind of already have this issue. So yeah I hope the breaker scope is really minimal especially. If like we support this iny libraries and whoever + +[28:00] Is using string key library to just convert to and from a c address they will just need to update the version and hopefully everything just works yeah George you turn stage yeah I just wanted to add, that yeah I 100% agree, that we should minimize or we should try not to make any changes to the actual mxed account XDR address or like the M string key format. Because there's deep assumptions in a lot of places Downstream both in platform products and Beyond in the ecosystem about the M string key specifically having the integer as the ID. So yeah. If we can isolate, that to SC address, that sounds great right, that is option four right or + +[29:00] Option yeah four right yeah or it's more like it's not option three I think option three is the only one, that modifies the string key yeah I think option two is the closest yeah like using M wherever possible. But not trying to shoehorn text and hash into an M address as long as we're talking about the stry representation not some other representation yes yeah I think, that's right yeah I understand, that concern I personally. When I look at these four options and I think there's more than these four options. So I think you know. If people have other ideas like please post them to this thread these are just the first four, that came to Wine the attractive thing about the first option and the third option is, that it's very consistent with what you see going into a transaction for I think for + +[30:00] exchanges at least or for legacy users. So. If somebody puts in a memo, that's a string it comes out as being labeled this is the here's the string in exact like it looks exactly the same. So. If somebody's using developer tooling they see they decode a transaction they see this transaction had a particular memo they decode the event they can see the memo right there like they look exactly the same. And. Then the same thing. If you know. If the mammo is an integer it looks exactly the same and. Because we need to decode the M into a G for the topics I don't think it's such a stretch for option one to like break it apart and for even in the MX case the integer to come out at least there's some consistency there the concern I have with the two case is, that. Because it's sort of a little bit less consistent you can create a transaction with a Max + +[31:00] Address and you get an m on the other side, which makes a lot of sense. But. Then you create a transaction with a g in a memo and some memos result in an m and some memos don't result in an m. So yeah this might just be maybe this is an edge case we shouldn't worry too much about. But just, that inconsistency seems surprising to me I think it will be surprising to somebody just to clarify real quick isn't it normally the case, that. When people use the ID for differentiation on like Omni bus or custodial accounts, that they would use the ID form like text and hash don't seem like they would apply necessarily there. So it might be like an edge case, that you know they're attaching it as a piece of text, that is supposed to like display something. So putting, that into a + +[32:00] Memo would be it's not intended to differentiate yeah we should I mean we should use the big query data set just to validate just to make sure we got these numbers exactly right. But. When I've looked at this in the past I have seen plenty of text usage actually. But a lot of the text usages exchanges placing numbers into text form. And. Then using the text the memo text I see. So kind of a misuse of the text form yeah I think Jake has just commented on, that I think just our comments about what most exchanges are doing we just need to validate, that. Because I know like one exchange, that actually does use MOX counts well they support both moxed and non-med for the non-med they use text. And. Then for the moxed they're obviously using the ID, which is interesting + +[33:00] I think one of the advantages of option three, which modifies the M string key format. So, that it can contain more information is, that it sort of pushes everything towards, that MOX address format I don't know. If that would really change adoption of it probably not. But it does create like a single unified View and the data, that comes out the other end I'd be interested in Simon. If he's here on like you know what the perspective is from like a data consumer perspective like a future data consumer perspective assuming, that the network will always have more users tomorrow than it does today yeah like, which of these approaches makes the most sense + +[34:00] For future users and future consumers hello I would actually say for I guess from like a big data or like olab perspective I don't think any of these would cause a problem for scalability yeah I'm not sure. If that's helpful or not. But I think the amount of users, that unless it gets into I guess like the billions of something range, which I guess is the like possible won't be an issue for like the next decade or. So I don't think this would be a problem from like an olap perspective + +[35:00] What about from the perspective of your a data engineer trying to represent this data. And. So you're looking at a lot of the data has just a single value for this destination. And. Then in some cases oh this value is actually a combination of multiple fields or something like, that does, that make it more or less difficult. When integrating into other systems or would, that be like a concern or something, that could be like a foot gun like something, that'd be easy to make a mistake with I think for our use case I don't really have a concern with, that I think the concern would be more for whoever is ingesting from like RPC or creating like some Horizon like end point. So I don't know. If I'm the best to speak on, that + +[36:00] Yeah from Horizon. So from Horizon's perspective. Because we already support M addresses in their current form it would be like the format itself would break right. Because you would need a way to distinguish what kind of memo it is. Now. And. So anybody who's relying on the current string key format would know longer be open be able to use the same string key format right any existing you know queries or lookups or parsing routines anyone who's trying to find stuff in Hubble right they would have to reformulate their M address as far as I understand it I mean it depends on how it gets formatted in the XDR I guess or how the shinky definition defines it. But yeah it seems like this would cause a lot of pain for Downstream. If we extended it to be more than just strictly integer + +[37:00] IDs I think it can. When I had to look at I did a quick Spike having a look at what would need a change about the M string key and it's 100% extendable without breaking existing string keys. So without confusion. So like existing string Keys would continue to decode to the exact same value and for any existing decoder, that follows the back any new string key wouldn't overlap with any existing string key. So, that every new string key, that didn't follow a different format would definitely fail like say like. When I say new I mean like the text the return and the hash typ of MOT types. So I think we definitely could do, that. But without breaking existing systems. But you know whether, that's the right way still to do, that is another question you know based + +[38:00] On on there will be systems, that probably won't upgrade to the new format and they might assume, that there's nothing to change. And. Then they become broken yeah I don't know I'm interested to hear like Nico you were just talking about exchangers will crack open the M address for case three. So, that's strictly worse as a use case could you unpack, that a little bit yeah I mean it's basically like you know in the in their inje system what they are looking at is deposits to their hot wallet. And. Then they. And. Then the memo, that's how they you know ingest the data like I don't see like why they would want to basically track like M addresses as the kind of the deposit key for you know separate from the hot wet basically yeah, that's a good point also for exchanges, that use Define + +[39:00] Those identifiers like per customer too like you're you are really interested in, that identifier whether it be an ID or a string I think what I hear you saying is exchangers actually don't really care about the M address after, that import other than using it as an input. So, that users can just enter, that one value. When they're ingesting syst doesn't really am I guess you can still argue, that as a someone who's we're basically deciding what is going to be P published as an event. But you can always go back and forth between the M or the address plus memo right and you can abtract, that away in the SDK or even like as an indexer was actually ingesting this data and converting and serving at. However you want right. So does it really matter I think the point about breakage, though is very important right CU I feel like you first + +[40:00] Said it wouldn't cause any breakage. But for anyone who is parsing currently M current M addresses and they don't upgrade their implementation to the new thing. If they see an address, that is using the extended version they would probably break right yeah in, that case they're not going to be able to decode it. Because it's it'll be a different format yeah I think it's really some confusion. Because I think we might be reading this proposals differently I do not think any proposal assumes didn't shink keiss anywhere in the events yeah, that's actually exactly what I was about to ask like what. When we say from M and to M right in the transfer topic from G and 2G is an address yes basically in my mind from G2 g means, that from SC address account ed25 whatever to C address account and + +[41:00] From M to m means address maxed account, which I have introduced in my Spike rate and address Max account variant has the max account XD payload U, which is existing Max account, which it doesn't even have to be frankly it could be just you know Ed plus ID there is no additional level of neration and the same goes for this new memo account from z x j point it can be just a variant of a c address, that contains G well ed25 key and memo, which is just the transaction memo tapee for example. So this key discussion is rather like how do we + +[42:00] Like like. If we want to convert to string key or not. But you know the event structure does not change I think well I think there is an impact here. So yes string keys are not part of the protocol and they're not part of the XDR. But they are part of the developer experience and how we structure this will impact it to a degree. So you know we you I agree, that you know we can bundle these two pieces of information the address the actual address and the memo separately or together in different structures the same structures. But we do need to make a decision in the event. If it's a top like. If it's. If if this event the data section is going to become a map is one of these fields going to be something, that we expect to be a single unit or we going to expect them to be separate units. And. So and, that affects how it's going to render in things like the developer Tooling in the RPC Json API. Because + +[43:00] That's yeah like, that's where we actually do map things to string keys and they either map or they don't map well. If if we make them like separate fields in the data nothing, that we built today for developer tooling is going to be able to map those things to a string key we want them to be a string key. If we put them in a single field. Then we have, that option. But, that may or may not be a good idea and we're also talking about redundant data too. So does it you know. If the G is in the topic does it really make sense to spend another 32 bytes and actually keep, that g in the data as well, that's unclear to me right yeah I just want to point out, that like basically for the exchanges I don't think any option is specifically like bad or inefficient. Because like still have a somewhere in the event. So they you + +[44:00] Never need to like par this out of strink key as for the overhead yeah I think it's a valid concern yeah I think like number one I think niik sort of pointed out, that was an option, that was is like low risk of doing the wrong thing with like option one is. So simple you know, that everything's pulled apart you've got everything in the event the really only downside of option one where everything's broken apart is the developer experience isn't quite the same. So. When you're looking at a human readable version of an event you're going to see this memo and a g address you're not going to see, that M, that was in the original. But this already happens everywhere you know. When you go to `stellar.expert` or other explorers and you drop in an M address it immediately drops you into a page about the G address. So I don't actually think we need to have complete consistency with madress goes in madress + +[45:00] Comes out yeah I guess I just generally say I'm generally for option one I think it's simple the messages smaller yeah I think this may be fine and also like for the consistency part like at least like. If you can have M addresses in the contract invocations things are immediately much less confusion. Because you know. If you wanted to check your transaction like there is a very good chance, that block Explorer will correctly say, that you actually have performed the transfer to an M address and you know not some pair, which was my concern for the input. But for the events I agree it may be not as important and events are generally much less human reable. So maybe yeah it's not worth to optimize for exactly the same range as rendering as we would have expected + +[46:00] From the signature of the function. So yeah I think option one is probably fine okay. So it sounds like we're leaning towards option one no one's opposed to, that if. So we can move on to the next topic yeah I'm good all right the next thing I think we should discuss is what to do about the TX memo. If we should admit, that as a separate event. Because you know for example. If you're replaying. If if an exchange is replaying using Classic Events and someone sent a you know a payment to coinbase with TX memo and the M and a + +[47:00] M account we don't have space in the current events as there as they defined to include both like both the txl and the MX information. So an option, that Dio mentioned is we could just emit a system event similar to how we [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) emits a fee event and, that event will just contain the TX memo. If one existed anyone have any thoughts on, that I mean I you're saying sorry like. So you're saying like you would not evit emit a m or whatever like know this destination memo event. If you have a transaction memo no. So. When I'm saying is like on a payment. If you use a MOX account you will have a des station memo in the transfer event right. But. If that transaction also had a TX memo set what do you do yeah. But. But + +[48:00] First of all like I'd like to understand what happens. If you only have a transaction memo and not a Max destination yeah. So the CAP currently, that says, that you there's like an order of Precedence. So you would pull the TX memo and put it into the event. But. If you emitted the TX memo separately it would probably make sense to ignore, that order of Precedence and have the consumer pull the information, that they want right no, that's mistake yeah. Because we saw with Horizon right like the this model where you let clients kind of decide we I think this is the opportunity to actually fully specify this precedence order. So, that you don't have ambiguity right. Because I'm pretty sure I mean I don't know I don't want to put you know exchanges in you know in trouble. But I imagine, that the This president's order is not + +[49:00] Actually respected. Because it's actually fairly complicated no what well let's I step back for a second is the ability to re for an exchange to replay from Genesis using events a requirement. Because. If it is. Then you can't make assumptions about. If they care like you know certain exchanges may care about the TX Momo and some may care about the M information right I think this is a new event stream. So we get to do what makes sense. If I think yes you don't want to lose the information, that there was a TX memo yeah at the same time I do think, that the transfer events should contain the you know what we think is the I mean as per specific as per the specification of transfer events what we believe is the right you know destination memo in this case yeah and I think on, that point it + +[50:00] Doesn't I'm not actually sure. If reap like we can't we change this is a new event Stream. So the replay can use the new semantics. While the previous versions of meta and the transactions still describe the old semantics yeah the events can you know do whatever we can be specified. However we want them to be maybe I wasn't clear like what I was initially saying is, that the TX memo would be in this new event and the transfer event would only have any it only have MOX account information and I think ni you're saying, that in some cases we should push the TX memo into the transfer event as well I think for yeah like. If we are going forward like it's more like yeah what do we want the those events to look like for Classic Events I think we want them to properly represent the + +[51:00] Destination and. Therefore you need to see the transfer event with the proper destination plus memo like you know like option one let's say right even. If the memo is actually at the trans has been specified at the transac only at the transaction layer okay. So. If the account wasn't moxed. Then for the memo into the transfer event and the M basically indistinguishable. If you have like only the transaction level memo only the destination you know M account on either transaction or operation right those three cases should all end up with the right U you know transfer event with the added memo information as far as like the you know do we need a an event for the memo I don't know what the use cases for, that are. So + +[52:00] Yeah like people you can already get, that by pulling the transaction itself and you know cracking it open basically. If you want. If you really, that. But I agree, that you can do, that. But like didn't we want consumers to not have to do, that I guess it depends on. If that's a requirement or not we care about in the context of. If I think the context of the CAP was transfers in the context of transfers this sound seems to be out of scope okay sorry I'm not totally tracking with + +[53:00] With what we've just said. So Nico is saying, that the, that the emitting an event purely for the TX memo like a separate event is out of scope in terms in the context of transfers I'm not sure about this spe I'm specifically thinking about the replay case for exchanges cuz I you know. If you what is what happens. If you have a transaction of the TX memo and MX destination account. Because without the TX memo event you can only show one right. But in, that case the thing is, that. If people have like complex logic they will have to actually derive it from the transaction they have to know exactly okay did we, which combination of things right where they into I think, that's why I'm saying like the transaction memo is not sufficient. If you. If you're get. If you're starting to pull, that thread like you have to do transaction + +[54:00] Memo two second on the transac I mean destination oh actually no this one I guess the override can only happen on the two. So it there's only one place. But. If it's the source, that's where you have like the transaction Source or the operation Source right, that are on yeah the source does make this more complicated this is well I'm actually not sure. If the source makes it, though much more complicated does it isn't the complexity, that an exchange might be reading the TX memo today and actually ignoring the M the MOX address and just using the G part of the max address isn't, that the complex part yeah well. So, that's the, that's scenario, that I like this event might help you solve maybe right. Because I'm sure, that I'm guessing, that happens today like. If I send a an a payment to coinbase with the MOX account set and a TX memo like they. If they probably just I'm guessing they treat it as. If the account + +[55:00] Wasn't mxed right no, that's why we don't know. Because it's yeah under specified okay I guess for this we'll like to me, that yeah like the we this discussion is more like I see the historical stuff as more like as a best effort. Because really like exchanges in particular are they really I mean do you really want people to be reinges you know from Genesis or something like they already ingested already reconcile all these data in their back end yeah, that's a good point okay there's also the additional ambiguity of. If someone's sending to a m address and has a memo at the transaction level you shouldn't necessarily assume, that transaction memo is a specification of a user on the destination side right it could just be + +[56:00] Some free text just for the purposes of describing the transaction yeah the point of this event was just to you know show all the information and have the consumers deal with it. But it sounds like we don't need to deal with this right. Now also we're out of time the last thing we were going to discuss we can maybe do this offline is, that we don't we no longer need [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md). But I don't know Deo. If you want to say anything about, that yeah I guess the consensus of the discussions has been, that yeah [CAP-64](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0064.md) is not needed and yeah this is just an official announcement, that we Are CL on it. Because MOS will be implemented differently yeah, that's it all right. Then we're out of time for today thank you for joining and thanks everyone for participating. + +
diff --git a/meetings/2025-03-06.mdx b/meetings/2025-03-06.mdx new file mode 100644 index 0000000000..73c2833614 --- /dev/null +++ b/meetings/2025-03-06.mdx @@ -0,0 +1,90 @@ +--- +title: "Introduction to OpenZeppelin on Stellar" +description: "An overview of the Stellar–OpenZeppelin partnership, covering Soroban contract libraries, token standards, and new developer tooling including the OpenZeppelin Contract Wizard for Stellar." +authors: [carsten-jacobsen] +tags: [developer] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This meeting introduces the multi-year partnership between Stellar and OpenZeppelin, with a focus on improving the Soroban developer experience through secure, well-audited smart contract libraries and tooling. Christian Santagata from OpenZeppelin outlines the motivation behind bringing OpenZeppelin’s approach to Stellar and how it aligns with the ecosystem’s growth. + +The discussion highlights the initial releases already available to developers, including Stellar-compatible token contracts and the OpenZeppelin Contract Wizard. The session also looks ahead at planned tooling such as relayers, monitors, and testing infrastructure designed to simplify development while maintaining strong security guarantees. + +### Key Topics + +- Overview of the Stellar × OpenZeppelin partnership and its goals +- Bringing OpenZeppelin’s security-focused contract libraries to Soroban +- Initial Stellar contract releases: + - Fungible token standard with extensions (mintable, burnable, pausable) + - Emphasis on secure-by-default patterns and internal audits +- OpenZeppelin Contract Wizard for Stellar: + - Browser-based, no-signup contract generator + - Interactive configuration of token parameters and extensions + - Easy copy/download flow for Soroban deployment +- Planned developer tooling: + - Relayers for gasless and automated transactions + - Monitors for condition-based automation and protocol safeguards + - Code inspection tools for early issue detection + - Future deployment and testing tools tailored for Stellar +- Roadmap highlights: + - Upcoming NFT (non-fungible token) support + - Upgradeable contract patterns + - Ongoing community feedback shaping future releases + +### Resources + +- [OpenZeppelin Contract Wizard for Stellar](https://wizard.openzeppelin.com/stellar) +- [OpenZeppelin Stellar Contracts Repository](https://github.com/OpenZeppelin/stellar-contracts) +- [OpenZeppelin Stellar Dev Walkthrough Video](https://www.youtube.com/watch?v=iD7ZspsZLVo) + +
+ Video Transcript + +[00:00] Hello and welcome everyone to this week's Stellar Developer Meeting today I have Christen from open sein here and he's going to give a quick introduction to open sein and the collaboration we've been having for a. While and just have a quick checkin in on where we are on things with getting Stellar on open sein. So welcome Christian nice question nice to meet you yeah. So it's great to be here I'll give I think I can give more a bit of introduction about open zapping and for the people, that might have missed it our partnership the work, that we're going to do with Stellar. If that's, that's okay yeah, that sounds good, that's great yeah I mean for the one, that missed it open zein signed basically a partnership with Stellar, that is going to be two years long and + +[01:00] the goal is to improve the developer experience on Stellar why is, that the reason is, that OpenZeppelin is of course known for being a security blockchain company we are like one of the top auditing firm for security services. But we built, that based on our Solidity contracts Library. So to give you the reference our Solidity contract Library as basically processed over 20 trillion in total value transferred it. Now secured over 150 billion in total value lock processing over three billion of transaction 250 million active wallet use opens app in contract like, that's huge adoption of course to give like a percentage like 35% of EVM blockchain transaction are processed for open Zing contracts and it's like a of course a big number, that tells for + +[02:00] how simple how safe and how good is for developers to build on top open zling contracts. So the goal of the partnership of with seller is to bring, that easy of building and, that nice developer experience to the Stellar ecosystem we believe a lot in the Stellar ecosystem and we want to grieve Stellar developers and Stellar develop a great experience to build application better and easier. So over this year we're going we're basically going to develop a full soran contract Library bringing many of the flagship cont, that we have on the solity library. So like erc20, that is like fible token RC71, that are like nonf fungible token volt the volt standard and many others, that are + +[03:00] tailor it to the Stellar ecosystem and on top of, that we're also going to build open source developer tooling, that is going to help a lot developers to automate a lot of tasks one of those tools is Rel layers, that allow for like gasless meta transaction and it works together with monitors, that we're also going to bring on Stellar. So basically you might want to set some specific condition on your contract and what you want transaction of course to run automatically based on the parameters you set you will be able to do, that very soon also Stellar and, that's I think great and in general yeah we'll work on improving developer experience based on feedback by the community, that's a bit on the general overview of what we're going to build over the next two years it's going to be a long partnership yeah it's great I love, that it's going to make it easier + +[04:00] for developers to add tokenization or add payments and also the secure part of it I think is a very it's a very appealing option for developers yes exactly. So yeah exactly the goal is to bring the more tools, that can help really developers to build in an easy and of course Safe Way and actually I mean it's we did very recently the first release of the library we actually released the fible token standard along with some extension, that allow the allow for token to have extension like minable burnable posable and a couple of other things it's just of course a first simple release. But it's already being used and, that's, that's + +[05:00] amazing and actually part of this in as part of this release we also added Stellar to the OpenZeppelin wizard, which is a smart contract interactive generator, that I can share right. Now I guess right yep I actually have an opportunity to play with the Wizard a little bit and I must say this makes it. So much easier there's a lot of thoughts put into building this it's very obvious and it makes it very easy and productive for developers yeah exactly we this is basically the wizard interface we have many users over the over 10,000 users using our Solidity wizard, that you can see as many standard, that you can take as a reference of how the Stellar wizard might look like. But yeah we did the first release and the great thing of the wizard is, that you can + +[06:00] go to wizard. Openpr.com Stellar and it's completely free like you don't need to create an account or anything you just go on the application and you can basically get your fible token Stellar contract in seconds I'm not a developer I Le marketing at open zpp by the way I didn't introduce myself. But it's actually also for me very simple to use like for instance like you just set the name of the token I'm going to call it like Christan token give it a sybol and you can see like the automatically parameters are changing in the contract you can give a premi amount of token this is the amount initial amount of Supply, that he gives to the token deployer address set like 10,000 token and you can see it's appearing here on the wizard already with the decimals, that it needs + +[07:00] to have. And then you can set also like some features the first one they do basically what you see on the screen. So the first one is minable and it gives basically right to the token deployer to create more Supply at a certain point and. If I click on it you're going to see, that it's atlet add the code, that he needs to add another feature, that is barnable and on this one the token deployer address is able to bar destroy some token at will and the last one, which I think is very useful also outside of token is posable, that gives the token deployer address the right to exactly own the contract and it's gonna basically input all the parameters, that you need to deploy the contract in here and I think POS is very interesting it's a very interesting extension not just for token. But in general for smart contracts, that I think one of the main things like I + +[08:00] I've been in the blockchain industry for some time and like one of the core things of blockchain is always, that we always spe is like immutability, which is a great thing on one side. But on the other side we know, that developing application is a custom process and sometime you're going to need you know to have, that function, that allow you know to oppose a contract for security reason we work with many ds like compound scroll and others and is a very important extension in case of security events so, that's an important one and finally yeah you have the license by default open Zepp license are under MIT it's just a note. But anyway like you can select like your parameters and the final thing you can just copy the contract and put it in your seller CLE or yeah you can also download it and do whatever you need it but, that's a bit the overview + +[09:00] for Wizard great yeah and. When you talk all these features you just see, that it just adds the functions to the code it's really easy yeah exactly I mean it's great as I told you like I'm not a developer. But I'm able to create a contract myself. So exactly, that's great you just toggle the button and exactly is going to change parameters it's going to import what he needs to import and adding other function here at the end so, that's, that's exactly the thing and I think, that James bakini also did a video for the wizard, that both open zein and build Stellar POS on X or Twitter you can check more on, that is actually put bring the wizard into + +[10:00] action and he really is able to deploy a contract on Stellar blockchain in five minutes with the Wizard and I think, that's a great thing right what I said at the beginning the goal of partnership is to make development on Stellar easy fast and safe and Wier is a great tool, that. Now developers and Stellar can use great yeah I can see I see James link to James video and I'll recommend everyone to go watch it's really a neat video and project. So you mentioned a little bit about Tooling in the beginning what kind of tooling are you providing and what kind of what do I get. If if. If I sign up for open sein yes yeah Ian the cool thing is you let's say you don't need to sign up in terms of open the app we have the tag line somewhere open up in equal open + +[11:00] source we are committing to more and more to open source development and we mainly do, that through two ways first one is contact libraries and again like every everything we release is you know on GitHub is public and we also encourage like we encourage a lot of community feedback to improve our contract and, that's one side right you can take just open uping contract Import in the protocol, that you need and you can build on top of, that and it's already great in terms of tooling we are building on top of Defender, that's an application, that we built with a lot of development operation tooling I mentioned before a couple, that are Rel layers and monitors. But we will actually bring many more. So yeah I can give the round down, that is the first one is called Cod Spector U, that is basically used + +[12:00] to quickly analyze a code base, that you can input into the tool to basically give you back some known issues it's like a mini audit you're not getting the audit. But you're getting you know the round down of the main unknown issues, that your Cod might have so, that's already a great addition to check for you know non issue another one ex I mentioned Rel layers is very important like to the automation process of transaction and to working together with monitors, that you can set up really on any parameters or condition, that you want is great. So you basically can create workflows right for your protocol, that are very interesting. So to give more like practical examples like you might want for instance you let's say you know DeFi application can check on specific transaction, that on a liquidity pool for instance. If you + +[13:00] see some suspicious transaction you might be able to like post the market or you know CH change some parameters, that you want to change or something like, that or for instance another application, that maybe. When users like sign up into the application you want to send some you know a little bit of token to let them you let users play into your app. Because it requires some token you can do, that through Rel layers and monitor so, that's very powerful for building developer automations we're also going to build some other tools like deploy, that is like basically a tool, that will help a lot with like actually deploying smart contracts and making, that actually safe and easy especially. When you know sometimes. When deploying like new protocol version actually new protocol from scratch you have a lot of contracts to deploy all at the same time and these tools come handy and we're also gonna build + +[14:00] basically a tool, that is I think for the first time we're building this to exactly for cell, that is the test Tool we still have to Define with together with the community what, that actually will look like. But the goal is to bring a lot of more testing feature for sound contracts and make sure you know you can developers can easily test early smart contct before actually putting into production, that's a bit of the overview. But as I told before like this is just the starting point two years is a long time in blockchain to build a lot and we really welcome developer seller Community feedback to let us know what they think what they need and we'll definitely work with them to make the best developer experience possible on Stellar great thank you I don't know. If you have anything else otherwise we + +[15:00] could maybe see. If there's a couple of questions yeah no, that's it let's see. If we have some questions. But yeah, that's it let's go through I can see there's a lot of excitement about open integration in the comments I see one question it's about. If the partnership includes Upstream things to Sor B itself I do not know, that unfortunately I don't have to answer I don't know either I would need to check with the team. But yeah I'm sorry I don't have the answer for, that yeah no it's early on it's like you said it's a two-year project. So we just scr ing the surface right. Now and + +[16:00] and yeah I'm super excited about this about what can come out of this partnership and what it means for developers I think it definitely offers some ways for developers who may not have extensive experience in working with tokens or payments it's an easy way to integrate it. So yeah thank you. If if we don't I see some question about NFT, that's exactly non fungible token are next on the list to be your list to B very soon very loose very soon and of course like actually something, that I probably didn't stress is stress enough before actually you know putting every release into production phase we have a too internal audit opens up is again one of the top auditing firm we have audited really. So many of the leading blockchain protocols like + +[17:00] unop before 1 inch compound Mor for very any of the top protocol you can think of in blockchain opens up I've audited many of those guys so, that comes you know with a lot of security yeah NFT is next on the list upgradeable contact are also coming soon okay great let me see I don't know. If you have anything about there's a question from Matias about the USC 6900 I don't know. If you have any thoughts about, that I don't to be honest I think I mean what we can do is also what we shared before we can have another developer me and we can have like actually the + +[18:00] developers from open Zepp, that are building the library at the tools, that can give more insights into the tech, that we're going to build yeah I think we should do some followup follow-up meetings. Because as everything develops I think it could be interesting to get some of your engineers on board and maybe do some examples of how to use the open S integration. So so yeah I'm looking forward to see what's coming up and I'm happy to do another session later. So yeah exactly I see last question about like U from t like. If the parcl like audit for seller funded project we are also bringing like audit to Stellar. But I you can check with like the Stellar Foundation + +[19:00] that it's up to the Stellar Foundation. If you know do such a program. But we're also bringing like auditing services to Stellar. If that's the question okay. And then there's one about point of view on classic SL soron bridging I see I don't have an opinion to be honest on yeah it seems to be maybe a little bit out outside the scope of what's yeah plan for a little bit. But yeah it's nice to be honest many interesting questions and I can tell from the marketing side I've been seeing a lot of excitement from the Stellar community and, that's like great we work again with many other successful chains and protocols and the Stellar committee is one, that is really something, that you notice you know + +[20:00] when you do stuff so, that's amazing I'm really excited again for the partnership and having another session of this involving our developers answering more technical question I'm sorry. But I can't go any deeper than, that no and, that was not, the plan for this is more like an introduction to the partnership, that we have and a little bit of an look into what's coming up and what's already there. So so. So yeah we will do a more technical session at a later time. But thank you so much question for joining here today I think this is super interesting this is a good it's a good addition to our tool belt for the developers. So so yeah thank you for joining and thank you everyone else for joining today and for all the questions. So see you next week thank you very much Caron and thank you to everyone all these amazing question and the people you see + +[21:00] you next time right. + +
diff --git a/meetings/2025-03-27.mdx b/meetings/2025-03-27.mdx new file mode 100644 index 0000000000..d88d63e717 --- /dev/null +++ b/meetings/2025-03-27.mdx @@ -0,0 +1,162 @@ +--- +title: "Soroban Contracts Library Demo and Protocol Updates on Fees and Events" +description: "A technical demo of OpenZeppelin’s Soroban contracts and Wizard workflow, followed by protocol updates to CAP-63 and CAP-67 covering fee refund timing, events parity, and resource/event structure changes." +authors: + - carsten-jacobsen + - dmytro-kozhevin + - siddharth-suresh +tags: [developer, CAP-63, CAP-67] +--- + +import YouTube from "@site/src/components/YouTube"; + +## OpenZeppelin Soroban Contracts Demo and Developer Q&A {#part-1} + + + +This session is a technical follow-up with OpenZeppelin, featuring a live walkthrough of the Stellar Contract Wizard and a Q&A with engineers working on the Soroban contracts library. The discussion focuses on how developers can quickly generate audited contract templates, integrate them into a Stellar CLI project, and deploy to testnet. + +The Q&A highlights what’s shipping next in the library, how extensions are being designed, and what parts of Soroban’s architecture feel notably different from EVM development—especially around authorization and upgradeability. + +### Key Topics + +- Status of the OpenZeppelin Soroban contracts library and audit cadence +- Wizard demo workflow: + - Generate a token contract (name/symbol, premint) and enable extensions + - Add the `stellar-contracts` dependency (pinned to an audited release) + - Build and optimize WASM, deploy via Stellar CLI, and invoke functions to verify behavior +- Contract extensions shown/discussed: + - Mintable, burnable, pausable patterns for token contracts +- NFT roadmap details: + - Planned features such as enumerable/consecutive minting, burnable, and claim/controlled minting patterns +- Library roadmap and community input: + - Future primitives/utilities and standards (including vault-style patterns) prioritized by ecosystem feedback +- Soroban vs other ecosystems: + - Protocol-level auth patterns reducing boilerplate compared with EVM + - Simpler, more uniform upgradeability approach than proxy-heavy patterns in Solidity + +### Resources + +- [OpenZeppelin Contract Wizard](https://wizard.openzeppelin.com/stellar) +- [OpenZeppelin Stellar Contracts](https://github.com/OpenZeppelin/stellar-contracts) +- [Dev Walkthrough](https://www.youtube.com/watch?v=iD7ZspsZLVo) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to this week's Stellar Developer Meeting a little bit earlier than usual. But the reason is we have some really exciting episode here we have some of the guys from open sein we are doing this as a followup from the last session we did this time we're going to be a little bit more technical. So I would like to add the open sing guys from to our studio welcome guys please do a quick introduction of who you are what you're doing and yeah let's get started yeah I can start yeah thanks for hosting us Carston yeah really excited to be here my name is buan + +[01:00] I am open source developer at open OpenZeppelin yeah building together with osun the Stellar contracts Library yeah my background yeah it's mostly related in the past to some EVM development Solidity also some R development. So yeah trying to bring the best from all the experiences I had great welcome thank you hello everyone it's oen my name may be hard to pronounce for some. So you can call me o. If that's easier for you. So I'm working with OP Zeppelin for about one and a half years and my background is more on the high + +[02:00] performance Computing distributed systems and cryptography all of these included I started my blockchain journey in some startups for layer zero or layer one. If you will. And then transitioned more into po do ecosystem, and now I'm switched to Stell and quite happy with it great thank you and welcome to you too yeah we started this journey a. While ago and the reason is or the objective is to make it easier to for new developers to onboard Stellar and to build great Solutions. So where are we. Now how far have we come in implementing Stellar in open sppin can you just give a really quick overview of where we are and what's being worked on. If you want to go you can go yeah sure yeah. So yeah it's been couple + +[03:00] couple months, that we kicked it off we are already on our like approaching our second audit meaning, that we are we accomplished our second milestone and the idea is, that we got our work audited on a regular basis. So the products the tools we are delivering to Stellar ecosystem are safe and secure and yeah. So so at the moment like we last month for E Denver we shipped our first release, that was about fible tokens and the PO + +[04:00] utility. Now we are heading to delivering the non funable Implement non fible token tokens implementation packed with couple of extensions so, that suits different use cases. So and yeah next week we are kicking off the second audit, that will last for another week and. And then we are shipping the next yeah the next release okay great this is where we are great. So for those who may not have followed the openin initiative or have just curious what's what we're doing here can you maybe share the screen just show an example of what + +[05:00] the what it looks like the maybe the wizard just really quick give an introduction to like what is it the developers are experiencing and what is it the developers can do right. Now yeah sure I can showcase the wizard The Open Zepp Wizard and how it makes super easy for developers to have like a base basic smart contract at some custom functionalities. If they're needed and deployed to testet yeah I would love to do, that yeah I think, that would be a great introduction to open sa for new develop yeah sure. So let me share my screen + +[06:00] so can you look at my screen I think it's loading yeah okay. So here it is the Wizard. So it's `wizard.open.com` I believe it may be helpful to zoom in just one or two more bits to make it more readable okay this is this time I think good what do you think yeah it's I think we can read it. Now yeah it looks good okay. So yeah this is our wizard, which the idea of it is like to + +[07:00] make life easier. If you need to like to build up something quickly. So some with some base some basic features you can use it yeah you can see we have for different languages and U chains. So here at, that's Stellar for the moment we have only fungible. But as we fin once we finish the non fungible module and get it audited it will be added here similar to. So you can see at for Solidity there are different modules for the moment here we have only F and thanks to this UI you can configure the Bas the basic properties of your Tok for example let's + +[08:00] call it awesome token it's yeah really basic. So and here we can Define for example. If we want to do some Prem Min let's say we want to Mint 1,000 of bit of this token and here there are different Fe, that we can add on top of this basic implementation for example say we want it to be minable and you see how the UI gets updated. If you want to add a burnable functionality meaning to allow users to burn their tokens they wish. So we can click on, that and yeah this is how our contract looks like. Then let's deploy it to + +[09:00] test net. So we are going to set up a project with the Stell CLI. So yeah for those who yeah we can check at Stellar documentation how to install the CLI and use it. But yeah here we're initializing the project. So here we have our awesome token contract yeah we need to add the dependency here the recommendation is to use to pin it to a specific version + +[10:00] or tag. So as the this version 0. One is the most recently audited it's this version, that we are going to use. Then we are ready to like to delete this boiler plate, that was provided by the C and go back to the wizard UI here we are copying our contract going back to the file we are pasting it and yeah. So we have it we have everything set up. If we like. If we need to do some modifications we can we and we know of course what we are doing we can go and straight to the + +[11:00] to the codent modified. Then we are going to build this contract. So it is done. Then the recommendations are to also not only build. But also to optimize it. So there is a special yeah there is a CLI command for it. So well it's not there yeah it's. So my target + +[12:00] so here it is we gained like something like 1,000 and something bytes of our optimized version. So it is ready to be deployed. Now just for the record I have like I set it up wallet from the Stellar lab and funded it with some lumens test net lumens, and now we are going to deploy it. So it was called my awesome token right we are deploying it with the Alice key yeah here is U an address, that + +[13:00] will be the owner and the recipient let's say it will be Alice again. So Alice will re receive the tokens so. If everything's fine we should have our contract. So yeah this is fine it gets it got deployed this at this address and Alice received the 1,000 1,000 tokens, that we yeah we preed basically, and now yeah you can interact with this contract either with the CLI or build + +[14:00] a UI build a UI for it maybe just let you let demonstrate how we can invoke it. So just to. So this is the contract. So yeah it actually it is stored under token the token Alas and we going to check the balance of Alice yeah okay. So it's working we checked. So the balance is correct. Because it's yeah how + +[15:00] the yeah basically we preed 1,000 with 18 decimals so, that's why we see. So big zero. So so many zeros and yeah I guess this is enough this is great it really I convince you it's really easy I mean of course you have to be familiar a bit with the Stellar CI. But I think it's a great very intuitive and user friendly. So yeah it's really easy to get up to speed with, that. So yeah I think this demo showed really how simple it is to work with and one thing, that I appreciate a lot too is the testing, that this goes through on your end so. If if you are implementing this yourself and maybe you are + +[16:00] still pretty new to development on Stellar I think just knowing, that this has been tested thoroughly this is secure this is safe I think, that gives me a lot more confidence as a developer, that the products I'm building, that they are also safe for the users. So I think, that's, that's something, that you know the convenience of using open sein is obvious here it's super easy and simple. But I think for me the knowing, that this has actually gone through very serious testing, that, that's something, that I would like to at least put a little bit of Spotlight on I think maybe we can see. If there's anyone, that has any questions let me see I see Elliot actually one of my team members he has a question here he says in the NFT contracts are you implementing the same extensions as the + +[17:00] F tokens or is a different set of extensions I believe I can take this one. So they are not completely the same since the standards We are following are getting differentiated as well. So for NFTs it would be enumerable consecutive minable burnable and also claimable. But some of these are not exactly extensions. But we want to give the name of them so, that. If a person from other ecosystems comes here and wants to search for these they will know how they are implemented or adopted into Stellar. So these five things, that I mentioned will be covered okay great we have another question from JS maxi it says said + +[18:00] we are sentential developing an R ERC 4626 standard Vault Solution on Stell San and. If that could be integrated into open sain standard library for soran in the future I don't actually don't know. If that's something we can answer here I think, that's probably is about 46 36 yeah 46 26 yeah it is included in the road map down the yeah for the next Milestones I'm not sure. If in the Milestone 2 or Milestone three. But it is for sure included in the road map okay. But the question is they are asking for collaboration. So they are already trying to build it. So yes by the way yes we are the title of our jobs is open source. So of course any collaboration is more than welcome please be sure to check out + +[19:00] our contributor guidelines, which is the usual stuff we have like mainstream things there and anybody who wants to contribute can contribute in fact we do have three external contributors right. Now so the more the merrier great and this is why it's great to have you guys on here. Because I was not able to answer, that question you guys are the experts on, that okay let's see we have one question more from Matias what design considerations were made for the library considering classic Soroban and How Stellar classic assets work yeah I can maybe partially give an answer and O you're going to add. If I miss anything yeah. So we are trying to navigate between like different + +[20:00] let's say requirements concerns and yeah of course one of the requirements is to make things compatible with Stellar classic for the moment we have finished with the fungible token implementation, that is like in the, that was audited I demonstrated and actually it overlaps by with many of the features, that are available already from the Soroban sbone SDK itself. So on, that front we haven't done much. Because yeah most of the work was already done by the Stellar Development Foundation team. So + +[21:00] yeah for the NFT, that we are for the NFT part modu, that we are working at right. Now yeah there are some discussions about how this compatibility can be brought on the table there are some ideas, that are circulating. But yeah trying to accommodate also for existing Stellar developers, that are like try to leverage Stellar classic assets. But also for some other use cases where people need some customizations, that can be brought only by smart contracts okay great let's see yeah and I was actually thinking about this mat was he was asking on what's on the road map what + +[22:00] is coming next can you maybe talk a little bit about the road map there will be smart wallets many other token standards some utilities like fix Point arithmetic and Merle proofs stuff like, that. But they are all discussed and we may change the order Etc depending on the community feedback and our priorities okay great yeah actually yeah we are really looking for like feedback what is what are the utilities the The Primitives, that are most the mostly like Builders from Stellar are like eager to see. So yeah we are definitely looking for feedback so, that we can Shape Up together the our road + +[23:00] map okay great. So we have a question also from L again any highlights or low lights you can share about the Stellar sorond architecture compared to other blockchains since you have work with other chains you want me to start buan. And then you can continue okay. So authorization is done in a great way I will let poan speak more on, that. But but I also admire the way authorization and authentication Works in San my biggest admiration comes from the developer experience it's great right. Now it's super simple yet super safe and we also want to and try to do our best to keep this safety and simplicity in our projects I will L it to buan + +[24:00] yeah I'm also joining Al on this front and yeah just to add, that like yeah authorizations how they're handled like on a protocol level actually relieves a lot of pain points, that for example developers from coming from EVM chains are having like for dealing with authorizations actually many of the things, that are natively supported and Builders shouldn't care like at all in other ecosystems like it's additional overhead of like how to construct your contract. So they're not too big they're too complex and here like + +[25:00] Builders can focus on what's the most important the business logic, that is regarding their specific application. So this is a huge benefit other thing, that comes to my mind is for example how the how we deal with upgradability in soron it's just a seamless like a wine liner compared to for example Ethereum where are like dozens of different OB gradability patterns with different tradeoffs security concerns Etc and here is just a no-brainer. So it's bit a big thumbs up about the work, that's done in the background. So yeah okay great + +[26:00] well, that was all very positive. So happy to hear, that I think Matias was asking about the soulbound tokens. If if, that's possible is, that something, that's been cons being considered or thought about or is, that not something, that's been considered yet sorry it cut and I couldn't hear oh. So Matas was asking about Soul bound tokens. If that's soul to yeah actually it is not something, that is planned straight away. But but yeah I mean with the NFTs we are building the ground for, that and yeah maybe yeah we have to see how yeah how this is how + +[27:00] this can fit in the future Milestone okay great by the way it is worth to note, that we are building this for community so. If Community wants, that we will for sure do it great have you identified the biggest pain points from a developer experience for building cross Chain Solutions this also a question from yes right. Now we haven't explored how we would do cross chain stuff for Stellar. So I for myself am not able to answer, that as of yet maybe in the upcoming months I will be. But maybe buan will have something to say I don't know yeah just to add, that there is the AEL team, that are doing some yeah great job on, that front so. If you are interested in Cross chain cross chain + +[28:00] standards don't remember the exact number of it. But U yeah you can check their work yeah they have really good stuff there yeah okay great any other questions let's see I think we got H there's only one I skipped was what is the expected Cadence of releases for the library yeah I answered, that right. Now in chat. But let me say it from here as well we are we will try to have a monthly release schedule the keyword here is trying. But we will do our best okay great let's see any other questions I think we got quite a few questions, that were outstanding from our + +[29:00] last session where we were not really provide able to provide good answers I think we got all of, that covered. So it was really great to have you both on this call and I think I'm super excited about what this means for the developer experience on Stellar and I can't wait to see what people are building using the open sein Library any final words from your guys yeah just yeah thanks a lot for having us it was a pleasure for us also to thanks for all the questions and yeah don't hesitate to Ping us on Discord and yeah chat there about whatever you think yeah it's important so, that we + +[30:00] build a great tools for the whole web system. So yeah thank you and in addition to, that please feel free to open up any issues for the features you want to say or bugs you want to report or just questions you want to ask we are also pretty active on GitHub and it's a public repository. So feel free to interact with us from GitHub as well great thank you so much for joining and maybe we will do a follow-up session at a later time this was great love the demo love to pick your brains on these topics. So thank you again for joining and thank you for everyone who listened in here thank you and see you next week bye by right thank you + +
+ +## Protocol Updates on Fees, Events, and Resource Limits {#part-2} + + + +This short protocol follow-up reviews changes to CAP-67 and CAP-63, focusing on event parity improvements, restrictions to reduce ambiguity, and a refinement to how Soroban fee refunds are applied to support parallel transaction processing. + +The group also discusses whether events should represent intra-ledger balance transitions in a fully ordered way, and leans toward keeping events simpler (with more detailed reconstruction left to transaction metadata when needed). + +### Key Topics + +- CAP-67 updates: + - Emit `set_authorized` events for parity with the Stellar asset contract + - Inflation operation emits a `mint` event per payout + - Prohibit muxed source accounts and memos for Soroban transactions to avoid confusion and because they are not signed in auth payloads + - Consolidate diagnostics into a single transaction-level vector (remove operation-specific diagnostics vector) + - Add muxed account support discussed previously + - No separate fee-refund event; refunds are reflected within the fee event +- CAP-63 update on refund application timing: + - Apply fee refunds after all Soroban transactions are applied (rather than after each transaction) + - Motivation: fee source is not in the footprint, so per-tx refunds can conflict with parallel application when the same account is modified elsewhere + - Alternative considered: add fee source as an implicit read-write footprint member +- Events vs precise balance modeling: + - Concern raised that combining fee and refund in one event can hide transient balance dips within a ledger + - Consensus direction: keep events focused on end-of-ledger balance tracking; use transaction metadata for deeper audit-grade ordering details + +### Resources + +- [CAP-0063](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) +- [CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) + +
+ Video Transcript + +[00:00] All right. I'll get started. I welcome everyone to today's protocol meeting where we'll be discussing some recent changes to two Core Advancement Proposal [CAP-63](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) and 67. So first I'll start with some several [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) updates. The first one is, that we emit the set authorized event to give the unified events parody with the Stellar asset contract. There's also a change specified, that the inflation operation will emit a mint event for each payout. There's also a change to prohibit mox source accounts and memos for Soroban transactions and the this is. Because like. So the mox source is. Because like. So the mox source we're prohibiting the mox source to avoid any confusion on events and, that along with the memos are not signed in the o payload. So we're just prohibiting them altogether. There's also a change to have all diagnostics live in the single transaction level + +[01:00] vector and remove the operation specific diagnostics vector, that [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) originally implemented. And we also added the emu account support, that we discussed at the last protocol meeting and all these changes can be in the [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) link can be seen in the [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) link, that I shared below. And the last change is, that it's not a change, it's more of a detail, that we will discuss in this meeting, which is, that there's no specific event for fee refunds. And this it's conflated to the fee event. And this is related to the [CAP-63](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) change, that I'll discuss in a moment. But before I move on, does anyone have any questions? All right. Then. Then I'll move on to the [CAP-63](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) change. One move on to the [CAP-63](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) change. One sec. Can can everyone hear + +[02:00] me? Okay. Yeah. Then yeah, I'll move on to the [CAP-63](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) change. Which is the a change to apply the fee refunds after all Soroban transactions have been applied instead of after each transaction like it is today. So we this was done. Because the fee source is not part of the footprint and this is an issue. Because it's possible for, that same account to be used in a Soroban transaction. So. If you apply transactions in parallel you would need to make sure, that the fee source doesn't conflict the refund doesn't conflict with a different transaction, that modifies the ex the native excellent balance of an account. So we had two options here. You would either need to make the fee source an implicit number of the read-write footprint or we just changed how refunds are applied and apply them after all store run transactions have been applied and we decided, that applying the refunds later made more + +[03:00] sense. That's what [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) [CAP-63](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) went with. And the relevant [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) topic to discuss here is, that the refund is conflated into the fee event. So this is fine. If the goal is to just track balances at the end of every ledger. But. If a user wants to infer anything related to balances in a ledger or in a transaction. Then this can be an issue. Because imagine, that you have a contract as a example Lee mentioned. You have a contract where it the contract does an action based off an account falling below a certain balance. Because the fee was charged to, that account. But. If we emit the fee as the fee plus refund. When you look at the events later on like for example. If you're doing an audit you will never see, that the account dropped below, that balance, that the account triggered on + +[04:00] that the contract triggered on. So this is an edge case and I'm not sure. If this like something we can discuss is. If this use case actually matters. But the alternative would be to maybe emit a separate refund event. But we actually open up another can of worms here, which is, that. If you're the fees are applied before are charged before the operations are applied. So you would need to make convey the information about the ordering in, which the balances change in the events and currently the events are just a transaction level vector and an operation level vector. But we would need to make some changes where the structure, that's in TX meta, which is you have the you have everything, that happens during transaction application and everything, that happens post transaction application. We need to convey, that same information in the events and we should and. If we do need this + +[05:00] granularity, that might be the path to take. So. So yeah. If does anyone have any questions about this or have input? What I wanted to tell is, that like we don't have many people today. And we also don't have Lee and we don't have Nico and others. So I mean you definitely could listen to what folks who are present think. But yeah sounds like I can afford just yeah exactly yeah I'm inclined myself to just not do anything and just clarify in the CAP, that events should not be used for modeling the exact balances during the ledger application time. Because fees are in general charged at different points in time. + +[06:00] , okay. Here's Nicholas. Nicholas. yeah, it seems to me like, that's the thing to do here and. If someone has objections, you can discuss them async. Yeah. Because otherwise we would introduce a lot of structural complexity in meta for meta in events and we wanted events to be rather simple to consume and hopefully the consumers of events not worry about just reor operation. But yeah, that's my take on this Yeah. Yeah. I agree. I the use cases we've discussed with rel in relation to [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) tend to be for just tracking balances + +[07:00] which seems like you know tracking you updating your balance at the end of every ledger is what this was designed for. And. If you want to use this for. If you. When you do anything related to like audits or anything more granular, I feel like TX meta should be enough. And making this system more complicated to handle those use cases doesn't seem to be the right move here. So I agree with you. Yeah. Yeah. Exactly. And agree with you. Yeah. Exactly. And I guess the bottom line here is, that like even, though the effects are. When the ledger has been applied, you still get the correct value for transaction. It just applied at different points in time. But then. When you're looking at transaction events, you'll still get the correct key charge, that corresponds to what happened in reality. you just don't know the exact details. When exactly has been charged. So, hopefully for most intense purposes, please. So yeah, I + +[08:00] intense purposes, please. So yeah, I guess we can write this up and update the CAP and yeah, we can update the CAP and respond in, that thread. Okay. Does anyone else have any input? We have a small group today. So unless anyone has any other questions, we can probably call it early. All right. Well, it was a quick one today. Thank you for joining and yeah, thanks Nemo for your input. All right. See you. + +
diff --git a/meetings/2025-04-03.mdx b/meetings/2025-04-03.mdx new file mode 100644 index 0000000000..bbc66c6b75 --- /dev/null +++ b/meetings/2025-04-03.mdx @@ -0,0 +1,88 @@ +--- +title: "New features in Stellar Lab & Upgradeable Contracts SEP" +description: "A walkthrough of recent Stellar Lab upgrades for contract inspection, invocation, and TTL management, plus a community feedback session on the Upgradeable Contracts SEP and its reference implementation." +authors: [carsten-jacobsen] +tags: [developer, SEP-41] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This meeting showcases major improvements in Stellar Lab aimed at making Soroban development and contract operations easier, including contract exploration, storage inspection, and a more guided flow for contract invocation. The presenter demos how developers can go from generating and deploying a token contract to interacting with it and managing contract storage directly from the Lab UI. + +The second half introduces the Upgradeable Contracts SEP, contributed alongside OpenZeppelin work, and asks the community to review both the proposal and the accompanying contract utilities. The discussion frames upgradeability as a common need for evolving apps while noting the security and compatibility considerations required to do upgrades safely. + +### Key Topics + +- Stellar Lab updates for Soroban developers: + - Contract explorer with source verification flows and contract metadata + - Viewing contract spec (ABI-like types/methods) and downloading WASM + - Contract storage inspection and export (CSV/XDR) for operational workflows + - Smart contract listing improvements and ongoing performance work +- Guided “invoke contract” UX in Lab: + - Pull methods automatically from contract spec + - Prepare/sign/submit transactions from the UI with XDR visibility + - Display of resource fee estimates via RPC simulation +- Contract state lifecycle operations: + - Extend TTL and restore footprint workflows from Lab + - Using exported storage keys to target specific entries for extension/restoration +- Upgradeable Contracts SEP overview: + - Standard approach for contracts to upgrade their own WASM safely + - Key caveats: constructors not re-run on upgrade, avoid “upgrading away” upgradeability, and maintain storage compatibility + - Proposed metadata field for tracking contract versioning via semantic versioning (`semver`) +- OpenZeppelin utilities and future tooling: + - Reference macros/helpers for adding upgrade patterns to contracts + - Planned integration into the OpenZeppelin Wizard for toggleable upgradeability + +### Resources + +- [Stellar Lab](https://lab.stellar.org) +- [Upgradeable Contracts SEP](https://github.com/stellar/stellar-protocol/pull/1671) + +
+ Video Transcript + +[00:00] Lightning. Oh my gosh. Hello everyone and welcome to this week's Stellar Developer Meeting. Today we have two things on the program. We have first G. She will present the latest updates to Stellar Lab, which I'm super excited about. It's a tool I use almost every day. And. Then later on Jane also from the DevX team will join and talk about upgradeable contract proposal. So let's get started. I think it was in December, early December we had the latest the last presentation of Stellar Labs. But a lot has happened since then. So I'm super excited about learning more about the new features and yeah take it away G. Yeah awesome. Thank you Kirsten. Oh my god I'm. So excited to see like Emir enlightenment in this chat. Hello my name is Gson. I'm on DevX team. We work on CLI, lab, quick start + +[01:00] And more things. But I'm primarily working on the lab and I'm very excited to show you guys what we have built. So far for, smart contracts on Stellar. So I will share my screen and I'm going to share my entire screen. Because I want to do some CLI work as well. So, can everybody see me? Okay, great. I think Carson, did you share my notion doc? I am going to do, that right now. Please. So I'm going to walk you guys through what we're going to talk about today. First of all, we're going to go through overview of smart contract features on Stellar. So contract explorer. We're going to take a look at blend pool contract and kale contract by Tyler and sort of like kind of go through like the each feature, that we built. And the second of the presentation will be live demo, which I might break something. I actually did break something back in December. So who knows it might happen + +[02:00] Again. I hope not. I practiced. But yeah I feel like in the world of AI actually breaking things is. Now like in I think. So okay let's go to the lab the our public lab. I should have removed this bar. But okay whatever. So to use smart contract features we have to make sure, that our RPC URL is ready and I do have, that which is provided by Overcat `mainnet.sorobanrpc.com`. So thank you Overcat for working on, that. So this is the new feature, that we built. Our team member IA built this. So big shout out to Ietta. So I'm going to copy this contract and bring it here and this is going to give me all the information about blimppool contract and this is we getting API for this from + +[03:00] Stellar the expert as you can see I have like power by StellarExpert here. So here we have it shows source code of blend, which is how we verify, that this build is you know verified. So on Stellar unlike other some smart contract platforms we don't store source code on Stellar. So what we do here is we are checking GitHub action and making sure, that the commit, that this GitHub action ran match the source code and, that's how we verify the contract here and we have about 10,000 entries here. So it's it might take some time to load this. But this will show you all the storage, that are available on this contract. Contract. Might take some time. But yeah it like shows up here and this will be really useful. When you have + +[04:00] To extend TTL or restore footprint for contract storage. I will walk you through this later in the demo. But yeah we have a way to export this in CSV. So we go here it will show you all the XDR, that we have. And like I said, I'll walk you through what we can do with this later. I'm just going to close this for now. And source code is coming up. Contract spec is kind of like this is like our way of our version of Ethereum's AI. It shows you the data types, that this contract has. And later demo I'm going to show you what we can do with this contract spec to build some of the functionalities on the UI side and binding is coming up as well. So let's look at Kale. So Kale's going to be a little different. I + +[05:00] Don't think it has verified yet. But it will show us how big their entry or their contract storage is. Is like almost like 3 million I think. So it might take some time to load. But I just wanted to show you guys. It shows us all the version history and I'm not going to Okay, I'll click contract storage. It has three million entries, which is wild. But yeah, it shows all the information, that we the users want to know about the contract on the lab, which is great. Yeah, failed to fetch. We'll show, that later. It worked earlier. It might be too big. Yeah. So, like here has all the functions and data types. We can also do XDR. If you want. You can copy. You can also download the WASM. So, let's say you're building a contract to also want to interact with other contracts. You can download the WASOM and deploy within your contract. And we + +[06:00] Also have smart contract list, which shows us all the smart contracts, that are built. So at the moment everything is being rendered by front end. So what we are going to do to make this load much faster we are going to work on implementing back end so, that it will be more efficient and faster. But yeah, give it some time or I can also refresh. Should I refresh? Let me refresh. Okay. Yes. So yeah, all the smart contracts are happening. I can just click on it. And then we can just load it right away and check out what's going on. So yeah, so, that was the overview of smart contract + +[07:00] Features on Stellar. So what I'm going to do next is kind of go through the live demo or like building the contract and see how a developer can really utilize the lab to make their workflow much better. So I'm going to build a contract on testnet net. Open zapland recently launched SEP 41 token standards here. So look at all this beautiful token. And I'm going to use James's tutorial to build like sample contract really fast. So James is actually using OpenZeppelin's mintable standard to get the contract working. So I already did let me duplicate it. So I already built cloned his repo and did the first five steps. So all I have to do is start + +[08:00] Contract contract deploy. So let's go to my let me just go like this. So Stellar keys JSON Stellar keys Stellar key ls there's address JSON I need to do my address okay. So I'm going to do is the source who is responsible for all the transaction and anything related to this contract is going to be me JSON and just to make this work easy. I'm going to make the owner of this token contract my address and I'm gonna do I'm gonna limit my token at 10,000 and let's deploy this. I hope you work. Yes, it better work signing + +[09:00] transaction. Okay, cool. So, that one is deployed. So this is our contract and I think what I want to show you is our upcoming feature, which is invoke contract. So the way we invoke contract on the CLI is like this, which I kind of find it a little difficult or it's not very intuitive. I love our CLI. So don't get me wrong. And it would be sources JSON. And let me actually show you another feature of the lab, which is a creating key pairs, which is super useful. Because I'm going to account generate account and I'm going to fund this account with fbot. And you know. When you like develop + +[10:00] On any contracts it can be really confusing with like all these public keys. So I'm going to save this key pair and it's going to be testnet open zepp invoke account and going to save key pairs and I'm going to copy this go to my CLI and I'm going to mint th00and tokens to this account. Amazing. Yep. Successful. Great. So. If I go to this contract on StellarExpert, we'll see two data entries. Yep, GB. I just did, that. What I'm going to show you next is our upcoming feature, which is doing invoke contract on the lab. So, I've been working on this. It's been taking quite some time. But I am going to walk you + +[11:00] Through. First of all, I need my keys address Jason, which is this. I need this. I'm on testnet. Patch this. I think I will need my private to sign later. No. Was it styler case show? Okay. I need this later to sign it. So what I'm going to do is I'm going to invoke not payment invoke contract and our contract was this one. Go to this and I am able to fetch all these methods from contract spec, that I showed you earlier. So I'm going to do mint and let me reuse the account, that I did here. I'm going to mint some more on, that account. I'm going to do prepare transaction, which gives me this XDR. And. If you go to XDR view, it's going to + +[12:00] Tell me, that contract address is this. I'm calling function mint and this is the destination address and all the informations about what this TX is about to do. So good. I like this. It looks great. It also tells you the resource fee. All right. I'm just going to go ahead going to sign it and I'm going to sign it with this. Don't ever show your secret key. I'm just doing this. Because it's a demo and it's test net. So, I'm going to sign my transaction sign. I'm going to submit and transaction submitter. Submitter. And same thing, that we saw earlier. Looks good. Let's hope this works. And yes, it worked. Okay. So we have all these envelope XDR fee. I can go to view installer expert and it showed me, that this transaction did indeed work. And. If I go to this + +[13:00] Contract, it shows, that mint did happen and this all happened from the Stellar lab. Lab. And. So yes, that worked. One last thing I want to show you is our, popular stay wait. So we have a way to restore and extend TTL for contract storage and I'm going to show you how to do, that. So let's go back to our contract, which was this. And what you can do is, that you can go to lab smart contract. This feature is live. I'm going to load this contract and in this contract storage we have this balance and let's say this balance is like expired. In, that case I can download this go to the my CSV file and it will show me, that there is a key instance is a contract entire contract instance and + +[14:00] I need this is the contract storage key, that I want to extend or restore. Then I just copy this and let's check view XDR and see what, that contract storage key is about. If it's smart contract value and it tells me, that this is the balance and address this and let's say I want to extend footprint for, that one and I think I have to go to my account and going to get this and we can do extend this is live and contract get this and get the SC valve right here. I'm just going to do testing 200 sequence ledger and resource + +[15:00] Fee and I have this button called fetch minimum resource fee for from RPC. This does all the simulating and everything for you. And you go sign in transaction and I think secret key is this one. And we can submit the transaction. And yep, we were able to extend our TTL for the contract storage. So yeah, GBTL extended logistic expression to sequence 200. So, that's the demo, that I have. You probably won't ever use extent. Because I think, that should take care of, that and kind of same thing with restore footprint once Protocol 23 comes. But. But you know the lab can support it. So. If you ever need it, just come to the lab. And, that's the demo for today. Thank you. + +[16:00] Great. Thank you. It was super interesting to see all these new additions. And. Now I would like to invite Jane to the stage here who will give an update on one of the SCPs. All righty. Thanks for having me. So today I'd like to talk about upgradable sub, which is a sub, that defines how smart contracts can upgradeable up upgraded themselves and is contributed by OpenZeppelin. They've been doing a lot of great work for us shipping fungible token and also. Now NFT and upgradability and this is really a call for the community to look at this app and also look at the smart contract code, that they're shipping for us and add your comments. So as you know the contracts on Stellar are mutable by default and the mutability in Stellar ecosystem means a contract can modify the wasome by code thereby altering its + +[17:00] Interface execution logic metadata. So there are also cases where you want the contract to be immutable. Because it guarantees the trustless execution and application of the contract logic. So. If you think about blend, that is a immutable contract. But typically in a more common case as you know defy protocols or. If you're building a game. If you think about over time you might want to add more features functionalities to what you're building you probably would want contract up upgradability. So in order to do, that we were working with OpenZeppelin and we published the sap and Carson could you share the set please? Yeah I just shared it. Thank you. So it creates a standard to define how smart contract should upgrade their was and by code and also talks about some of the caveats you + +[18:00] Should think about. While you're upgrading such as a new contract shouldn't have a constructor. Because that constructor is never getting invoked or. If your existing contract is upgradable. You should be careful not to upgrade to a contract, that no longer has upgradability. So you're stuck there. Sorry users and you should check for storage consistencies making sure, that the new contract does not introduce any storage mismatch and also one more thing we actually describe in the step is to add a metadata, that describes the current sim. So essentially a new key value pair called BIMver and the value follows a singver standard major minor patch and, that would describe to the world of your contract and its upgradeable path over time and there is a simple code in the sub, which you can + +[19:00] See. If we go down to, that section and it will basically define a public function upgradeable and get the owner's address require o and bas. Then call the up the updated current contract wasome and put in the new wasome hash. So, that is basically the SE and then. If we go to the OpenZeppelin s source code at github. Com OpenZeppelin contract utils here you can see, that there is upgradable macros and this will make it so, that you can easily add this macros to your contract And eventually it's going to be integrated into the open 7 wizard. And you can decide. If you want your contract to be upgradable and + +[20:00] Use these m macros. So this app is basically out there and we'd love to get your feedback. Please comment and provide us feedback and hopefully we'll get these audited and merged very soon. Great. Thank you for the introduction and yeah everyone please go to the pull request and. If you have any comments any questions any concerns please write a comment and yeah we'll address it. I think, that was it for today. I think this was really interesting both getting an update on Stellar Lab. But also getting an a view into one of the SAPs, that are coming up and really encouraging everyone to take a look at it and provide any feedback you have. That's it. That's all we had for today. Thank you everyone for joining. We'll be back again next Thursday. Thank you. Take care. Bye. + +
diff --git a/meetings/2025-04-10.mdx b/meetings/2025-04-10.mdx new file mode 100644 index 0000000000..677762cdad --- /dev/null +++ b/meetings/2025-04-10.mdx @@ -0,0 +1,122 @@ +--- +title: "Tupui Developer Spotlight" +description: "A developer spotlight on Tupui’s Soroban-based project Tansu, a DAO-driven system focused on securing the software supply chain, anchoring GitHub project state on-chain, and enabling transparent governance and funding workflows." +authors: [carsten-jacobsen, pamphile-roy] +tags: [spotlight] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This spotlight features Pamphile (better know as Tupui), a senior software engineer at Bitpanda and long-time open-source maintainer. Previously he worked in the team who brought Flight Simulator 2020, and he created a consulting company to work on open source software. + +Tupui discusses his path into Stellar and Soroban and the motivations behind his project, Tansu. He shares how Stellar’s documentation and community support helped him ramp up from a Python background into Rust and smart contract development. + +Tansu is a decentralized system that works alongside GitHub to secure the software supply chain and break siloed communities by using a DAO. The conversation covers how Tansu anchors key project state on-chain, structures project governance, and aims to make decision-making and funding flows more transparent for open-source communities. + +### Key Topics + +- Developer background and onboarding: + - Transition from Python/open-source maintenance into Soroban and Rust + - Learning via Stellar docs, Discord support, and hands-on prototype projects +- Motivation for Tansu: + - Strengthening software supply chain security beyond traditional centralized tooling + - Making governance and proposal workflows more transparent and auditable + - Creating fairer mechanisms for distributing funding in open-source ecosystems +- Tansu architecture and principles: + - Designed to avoid a centralized backend; logic split between client-side and smart contracts + - Uses Soroban Domains as a project registration mechanism and anti-abuse/collateral layer + - Stores minimal project metadata on-chain while keeping richer proposal content off-chain +- Security features: + - Anchoring GitHub commit hashes on-chain to detect repository tampering or history rewrites + - Comparing on-chain configuration with project metadata files to verify maintainers and settings +- DAO and governance workflow: + - Project-specific DAOs with proposals, voting, and visible outcomes + - Proposal content referenced via IPFS, while votes and status live on-chain + - Support for executable outcomes using pasted XDR to trigger on-chain actions (e.g., governance changes, maintainer updates, fund distribution) +- Roadmap and experiments: + - Anonymous voting work-in-progress (exploring privacy-preserving approaches) + - Planned vulnerability reporting and release artifact tracking + - Interest in trust-based governance (badges/reputation) rather than purely token-weighted voting + +### Resources + +- [Tansu](https://tansu.dev) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to this week's Stellar Developer Meeting. Today I have a guest from the community. We are doing another one of the developer spotlights and today it's a pleasure to say welcome to Panfield also known as Tupi. So welcome and thank you for joining us today. Thanks a lot for the invite. Really crazy to be here. So I know you've been super active in the community for a very long time and I see you contribute to our documentation to some of our repos. You help out with the Stellar community funds and help out on Discord. I see your name all over the place. So I'm super excited to place. So I'm super excited to talk to you today. I would just really just as before we dive into what you're working on and your own projects like what the how did you get into blockchain the how did you get into to Stellar in + +[01:00] and how did you get into to Stellar in the way you are. Because you're very involved. Involved. Yeah. So I've been involved. Now with Stellar like actively for the last two years I would say and I really started to dive into. Because I. When I switched company I went to work for a bit ponda. So a crypto broker in Europe one of the largest and I told myself okay. If I'm going to be a developer there maybe I need to dive deeper in really a blockchain project and. Because I was a maintainer already of many open source project in the scientific python ecosystem. I told myself, okay. Now I need to do something and I knew already Stellar. It was really in the back of my head. I remember still I'm not sure. If it's still on the web page of `stellar.org`. But there was this story about somebody in Peru having some ship and they wanted to tokenize the sheep and exchange the ship for something else. And this was still in my mind and I just found, that concept fascinating. And. So I was like, okay, + +[02:00] that sounds like a real world utility and something, which can have some good social impact. And. So I really wanted to get into, that space and, that's why I picked Stellar to be the my I picked Stellar to be the my platform of choice. And yeah, also at, that time it was just before the Soroban started to be beta and together and yeah, I wanted to learn rest was in Rust. So I was like, that's a perfect match. So, that's how I started really to be active in the community. Okay, that's great. So. So you said Okay, that's great. So you said you didn't know Russ to before you started working. How how do how have the onboarding process to Soroban? How has, that been for you? You've been along for maybe a little bit longer. So. So what is experience? Experience? Yeah. So my previous experience was purely just Python. Because I'm a Python developer day-to-day job. U. So I would + +[03:00] developer day-to-day job. U. So I would say I'm a good Python backend developer. So zero experience in front end either. So JavaScript and all of, that. And the documentation was just amazing. Without any prior knowledge to Rust or anything. I had everything, that I wanted to have and I also have had zero experience in blockchain development either. So I knew what a smart contract was and the concept of it. But I did not know any subtlety about TTLs and lifetime and all of, that and how you deploy a contract and all this nitty-gritty and all this nitty-gritty details. I had no clue about all the complexity and the documentation was just a breeze to read and yeah. When I had questions, which were not answered the community was just amazing to like the Discord and also on Twitter people are super helpful. So it's just easy I would say. Great. So. When when you started developing on Stellar did you started developing on Stellar did you have a purpose? Was a problem? Was + +[04:00] there a project you wanted to build or did your current project come along later? How how does, that happen? Yeah. Know I at first I really started with some I did not have like a concrete idea I wanted. I had some problem, that I had in my head, that I wanted to do. Maybe some people in the audience also saw some in the audience also saw some of these. I had an idea with a I had a Raspberry Pi and I wanted to build like a gimmified platform where you actually you have a pump and you use actually you pump and as much as you pump. If you pass a threshold and it's going to do a trigger and to send some to do a trigger and to send some tokens. So I had something like, that I hacked away. That was my first contract actually and yeah, that's, that's what I built first and I even made a YouTube video about it. And. So yeah, I did one idea like, that and the second one, that I did was I wanted to explore around the tokenization of + +[05:00] things and make a token and. But push a bit further. And. So there I was okay I had this idea of looking at live data. So weather data. And so I was looking at the expansion of the ice and the contraction of the ice in the Arctic. And I was like, okay, I'm going to put, that data and use, that data to either mint or burn a token. And. So I had sort of an oracle in mind. And. So I built another project like, that. And building these two project gave me some confidence into building smart contract interacting with the community as well. And all everything is working basically. Great. I think, that I think it's a really good way to learn like build something fun, something maybe not super serious. But kind of get some experience building something, that you enjoy to do and where you can get some user interactions and see people enjoy it. So I think, that's a great, way to start get + +[06:00] great, that's a great way to start get building something more let's say more serious. So can you maybe talk a little bit about the project you are working on and I know you have a presentation as well. We can go into it a little bit. I think it's super interesting to see what the community is building. So I guess I need to upload the tech. So, oh perfect. Okay. Yes. So, the project I'm building is called Tensu. And so the background of all of, that is, that as I said earlier, I'm a maintainer of some famous open source project in scientific Python. So one of them is called Cypi and Cypi is downloaded close to 100 millions of time per month. So it's really huge. Per month. So it's really huge. It's really one of the powerhouse of + +[07:00] all the scientific Python tools like packages like a psychic learn. If you do some AI or PyTorch behind the scene they leverage these tools. And. So I've been a maintainer of this tool for like five years or something like, that and also deeply involved here in the open source world. And so I know the challenges, that the maintainer face are facing and also I know also the struggle, that can come to into using the software and I wanted to try to solve some of the problems, that I've seen by building a tool, which is sort of interacting with GitHub. So I'm of interacting with GitHub. So I'm not envisioning to replace GitHub at all. Because I think GitHub is nice to store the code and nice to store the code and everything. But I think we're missing on two key areas, which are first of all the security. Because I think in GitHub. Now they're building tools around security and around securing the supply chain and. Because when you're + +[08:00] building something on GitHub at the end of the day what you want to ship is a of the day what you want to ship is a software and artifact and GitHub. Now has tools in order to secure a bit all of tools in order to secure a bit all of, that and to ensure the provenence and to ensure, that the code was not altered and things like, that. But I think there's still like a long way to go into still like a long way to go into really fully secure, that. And the other aspect, which is the aspect, that I wanted to really cover with my tool. When I started to build is the community aspect. I'm saying, that. Because as a maintainer of a big tool like, that, how is the typical workflow in the governance going is, that you have a mainly list and you post your idea on the main list and sometimes it's sitting there for years and maybe somebody's replying and saying oh you have a good idea or not. And then there's a bunch of discussion happening it's never fair it's never really transparent there are a lot of background discussion happening lot of lobbying and I wanted to have something, which allow the committee to + +[09:00] be a bit more transparent to have a ways to really push ideas and have all the ideas evaluated in a fair manner, transparent manner. And so that's really on the idea side and another point, that people also may know about free open source software is the free aspect source software is the free aspect it's a double edged world in the sense, that there's a lot of people who wants to build this software they are dependent the pond. But then at the end of the day we have a limited amount of time. We all have jobs and it will be better it will help. If you will get compensating sometime for the effort and, that you put into this software and some big project like scypi we do get some funding. So scypi we do get some funding. So it's very modest I have to say right. Now in the bank account of scypai maybe there's around $100,000 and, that's a software, which is as I said used by millions of user around the world really dependent upon + +[10:00] and there are 30 maintainers ers active on the project. So it's huge and something like 100k is not doing going to help anything. You can you can barely hire one or two developer fulltime. If you wanted to and, that's the budget we managed to get after 10 years of funding or something like, that. So there's really limited budget and also the way it's being collected and distributed it's not very transparent. And. So with tonsu I want to have tools in order to be able to distribute this money a bit more fairly. Because now. When you would get a grant for instance maybe you split the grant into some principal investigators or two or three people. But what about all the people reviewing your code contributing to all this discussion and everything there's a lot of people who are left aside there and I want to try to have tools a bit for to try to have tools a bit for, that. So, Tanu is just a platform on top of GitHub, which is the goal is to help with putting really + +[11:00] decentralization back into Git. Because yeah, GitHub is one this one giant platform, which is very centralized. You push your code on it. But then. If tomorrow you're not happy with the state of the world or something, you can decide to change your code there. And this can have drastic consequences on all the people that depends on you. Because on GitHub, you can just force push on the main branch. There is not much history. If you don't monitor it, you won't know, that anything happened and you can really break people there. same goes with the community. You can really not listen to what people are not listen to what people are saying. You can push in electoral decisions and things like, that. Nothing is really fair. And. So here I want to have tonsu, which really gives the community a voice and also have decentralized organization basically, that allows to do specific actions in a trustless manner basically and yeah by the way. If anybody + +[12:00] has any questions I'm not sure I see. If there is anything in the chat. So far. But yeah feel free to stop me. Yeah, I yeah feel free to stop me. Yeah, I don't see any questions. So far. But yeah, anyone who has questions, feel free to ask in the chat. Perfect. And yeah, we'll soon drop to a demo, that will be there. And. So so okay, what is Tanu today? So Tanu. So far received funding from SDF two funings. So which allow to build the following, which allow to build the following. So on the left side. So a smart contract and a dApp, which are on test net. I also managed to build a decentralized organization. So from scratch I'm not depending on anything. The only thing I'm depending on, which is a gra project it's a roban domains. That's my way to sort of prevent people to abuse a bit the system. I force people. When they register the project to have a solar band domain for their project. And. So this way you have a collateral for the project itself and. If people abuse and create too many project we can deal with, that with bandan domain. And + +[13:00] that with bandan domain. And. Then I added some features to. Then I added some features to start to give some money to project. So this is the support feature. And on the security side I have this commit the hash on chain, which I will show later what I'm working on right now. I'm actually I have an open pull request to add anonymous voting to the system. I believe, that would be the first anonymous voting platform on Stellar. So I'm pretty excited about, that. Everything's working. On the smart contract side basically right. Now I'm just reworking a bit the dApp itself to make it work. So I really hope to have, that out. I don't know maybe in one week or two weeks. Because yeah the front end side as I. said I'm not a front end developer. I do have some help from a front end dev and yeah. Because I have the day job it's a bit hectic and sometimes I have to push some nights to try to push some nights to try to push things. But it's getting there. I have another friends actually who's + +[14:00] helping me on the next point. So critical vulnerability analysis and things like, that. We want to push all this information on chain and to link this information with the state of the project. And same for the res artifact. So adding more features towards the decentralized part decentralized organization part and also to the security part. That's the focus right now. Yeah, that's just a quick road map, that I have in quick road map, that I have in mind. So as I said I had SCF award, which all to kickstart all of, that. We build I built all, that in one quarter basically to have the first P and to be on test net. So was pretty fast. Because I had really a clear idea what I wanted to do. And. Now I'm really in this SCF build. I already finished the first trench. I'm on the second trench and I'm beginning to collaborate with another great project trustful to add a layer on the + +[15:00] decentralization part. I actually don't want to use a token really for the this for the organization. I want to use a trustbased system. It's a bit similar to how nodes are talking to each other in the consensus protocol and Stellar. We using trust and here I want to also use trust, that we place on people on maintainer and on participant in the community. So the more active you are in the community and the more good social impact you make in these communities you will get basically badges. This is the concept, that badges. This is the concept, that trustful is bringing and with this you will get actually your voting power. So I prefer, that over having just a monetary incentive u. Because yeah money is lacking. I prefer the money to go to a the support of the project than, that. There's a question like launch to sponsor transaction. I actually have to look into launchship. Because to be completely honest, I still don't really understand what + +[16:00] launchship is fully doing. But definitely I see a lot of people talking about it and I on the call sometimes to see I hope on the call sometimes to see what Tyler is building. And so what Tyler is building. And so yeah I will definitely have to look into, that to pay tax fees for this interaction. Yes. So, that's the. So for. Now I don't put any premium or on any features but, that's something I will need to think about. If one day I really want to make this profitable in a way. But right. Now I There is nothing like and you can see the code everything is open source pay master sponsor. Yeah. Okay. And. But yeah right. Now there's zero fees and the only way, that right. Now tonsu would make any money is there. When you click on the support button, which I will show you can decide to leave a tip to the platform. Right. Now there is just, that and yeah, that brings me actually to the presentation of the D itself. And. If I manage to present the tech + +[17:00] share screen, that's this one tech can I share I can share it yes perfect. So yeah you can. So the main website of Tensu it's just tensu.dev here you can find some rough documentation. I'm saying rough. Because I have the I'm a back end dev. So I my tendency is to write code and forget a bit the documentation. But there's still some documentation to go through to some documentation to go through to how to create your new project explaining a bit of rational about the soran Soroban domain. I also have a concept of a tonsu file to describe a bit your project. Basically, it's a way to map your GitHub profile to your Stellar address. That's one of the main feature. And the other feature is just to give to have some + +[18:00] to give to have some social link attached to your project. Because I don't want to store all of, that on a smart contract. And u something I have to mention is, that I'm trying everything I can to not have a back end at all. Because I really believe in decentralization and I think, that's really what we were trying to do here in blockchain in general and to me using a backend and having something hosted on GCP or AWS would sort of defeat a bit the purpose of everything we're trying to do. So I'm trying to have everything running locally on the client side and. If not. Then it's running on the contract. So there's no database or anything like, that. And I'm really trying everything I can to avoid all of, that. I mean even think about using distributed database using IPFS and things like, that. If ever I have the needs to use a database. There's also developer sections to discuss a bit about the architecture. So things, which are also in the SCF proposal, that you can you + +[19:00] the SCF proposal, that you can have a look at. Then the contract address and some links and the app itself. So I can directly jump into the h. And so yeah, that's the main app. So. If you ever went into the app you will notice, that it's really different now. Because I just merged like two days ago this revamp of the UI. So I'm really happy with the result. I had the design designer team working for two months on it. So thanks to the SEAF funding, that I was able to do, that. That's pretty amazing. So the logo and everything and all the look and feel u it's really helped me a lot made it possible. So how is the app is working is, that basically you have your project. So yeah you can search for a project it's a normal search. So tansu I have tonsu. If I don't find anything it's going to tell me oh there is nothing on chain. And then I can create a new project. So + +[20:00] I'm not going to do, that here just going to an existing project. So tansu. So here you have a bit of details. And then you look at your project itself. So I register tonsu and tonsu it can be a bit confusing Sorry. Because yeah, I wanted to manage my own project with my own tool basically. So what is the project page? I can quickly go over it. So you have some quick information here. You have this famous support button, which allows you to give some tips to, which allows you to give some tips to the tools, that you use. If you like what they're doing. So here as I said, that's the only way, that right. Now tons protocol will be receiving any funding. So by entering here an amount. So here you can do, that. Quickly on the wallet side. Because here there was a there was something to sign I'm using the Stellar wallet kit. So this is + +[21:00] on. But you may want to look into protocol. That's running aware. But interesting. Okay thank you. I will have a look. Let me copy, that. All right, copy, that. Thanks. Thanks, Matias. yeah. And. So here the main information, that you have and the information, which is actually stored on chain is this thing the last hash. So yeah here you see I have a bug apparently. Because you don't see the hash. But you see it hash. But you see it here being represented. So you have all the hashes here and you can actually go onchain and see the project itself. So let me just quickly go there. So here's the contract itself and every time you do something yeah you would see it here and here are all the + +[22:00] entries. And so how is the project defined let me try to find it. So yeah here's the project defined. So here is saliv. But had tonsu yeah here's the project how it's defined so, that's, that's basically the information store on chain I saw the project name some config itself. So who are the maintainer of the project. So who has really access on it. And and after, that I have some other metadata associated to, that and the main metadata right. Now is the last state of the project on GitHub, which is a commit hash. So this is this store in another entry. So is this seems yeah exactly this, that key for tensu here is the same as here last hash and, that's the last hash, which is register right now, which correspond to a commit, which exist and so. If I go back. When there is actually a match with unshaded data + +[23:00] and the data, that you find on GitHub it's indicating you, that everything is fine here you have a green tick and so. If somebody would compromise GitHub basically and overwrite the commits on GitHub. This will get read and I in the future I will add some triggers, which will send some notification to people to say, that hey something happened on GitHub versus what's on chain. So, that's one of the main security feature, that I have right. Now and the same goes with the configuration to say who's maintainer who's not maintainer. So this is check against the this tomal this tensu tomal file, that is I'm comparing the data, which is in this file with the data, that have function to have this sort of check. So yeah here you have the commit here you have this commit, that we just saw this F72, which was on, which is verified and. Because I defined some maintainer this is the last one, that I've done to and here I'm a verified maintainer in this list. So you have + +[24:00] some indication here of what's on chain and to be able to verify, that. So, that's the first part, the security part and the most important part for me is really this governance part. And so. If I go on the governance, that's really your DAO. You have not the Dow for the complete for Tanu. But for every single project you get a Dow, your own D. So this page will look different for everybody. And maybe. If I go back to the explorer itself, I can quickly show it. Here you have the DA of Tensu basically here and here you have all the list of proposal. And so on you can directly see who voted who approved or abstain and what's the status of the vault the title and here you have something you have IPFS. Because I don't store the proposal themselves on chain. But I use + +[25:00] themselves on chain. But I use IPFS. So once again I see some questions detail from the Gitcoin yeah I look into Gitcon. Actually. If you look. If you go to see my proposals for SCF, you will find the technical documentation and at the end of the technical documentation, I made a I had a look at everything, which was out there around G around decentralization and all of this concept and I made a comparative analysis of what I think of missing or was good and I'm trying to combine all of the nice concept and have it there. Basically what is the actual data, that is being stored on chain for the project I worried about state expiration. So yeah the data on chain I hope I'm answering right. Now the questions and otherwise for the state expiration yeah, that's something actually I'm still thinking about it exactly how it should work and how I should extend all the TTLS and things. Because I'm not sure TTLS and things. Because I'm not sure actually right. Now so. So for. Now I'm + +[26:00] actually right. Now so for. Now I'm not really dealing with it. Because I'm still on test. So I still have the freedom to do, that. But, that's definitely something I'm looking into. I have an openish on, that to. But I still need to take more time to just think about how the workflow is think about how the workflow is going to work this how should I pronounce your Discord name. Tupi it's my second name actually. I asked the same question before we joined. Yeah, it's a classical question. All right. So going back on the proposals themselves. So let me just open a proposal to see how it looks like. So this data as I just shown before is stored on IPFS. So what you just saw right. Now is loading the data from IPFS. As I said there's no back end or anything and everything's running client side. So this was loaded from IPFS. That's the proposal itself. When you create a proposal you can put an image, you can put whatever you want. I have + +[27:00] like a markdown editor. You can put tables and all of, that. And. Then all goes to IPFS. So I can actually open the link and you will show how it is. Yeah, it's loading. It's funny. It's loading faster on my thing than the IPFS itself. Wasn't he in Austria? Yeah, I'm living in Austria in Jenna. But I'm French coming from Tahiti. If you want this to be even more confusing. Okay, I the IPFX Explorer. I don't know what is happening. But yeah, you see the link here. It's a IPFS link. yeah, it's not loading for some reason. And I mean, this is not my internet. I don't know. Starting again. No, I mean the link is there. I don't know. IPFS. But it's + +[28:00] loading here. So, you have it here. one concept I have on the D itself, it's pretty surprised. Yes. One concept I have with the DA proposal concept I have with the DA proposal is, that I have the concept of outcome. And so. When a proposal is accepted it can execute. When when you're going to can execute. When you're going to execute the proposal itself and to have the tally being calculated you're actually going to be able to say okay. If it's approved please execute, that transaction. And so here you have an example of ex, that I put. So here in this it's a trivial example, that I put here. But you can put any When you create a proposal and so. If the. When you execute a proposal You're going to have like a screen telling you this is going to be executed. When you actually execute the proposal. And. So the ting is going to be made and this is going to be also sent. So what going to be also sent. So what can you use, that for? You can use this + +[29:00] for instance to add a m not just to pay people. But to me it was to do some special action for maintainers. For instance. If you wanted to change the structure of your governance, you can say you can have a proposal saying, "Oh, yeah, what about we add I don't know pumpfield to this other community." And so you will make a proposal and the XDR itself would just be to call the tanu contract itself and to add myself to another project and people. When they vote and. When you execute, that. Then I would be able to be added automatically into the into, that new organization. So this way you can really manage your the whole governance manage your project on chain as well or the aspect, that you have on chain. And also you can yeah have font distributions and all of, that. And what I want to do afterwards is. So right Now it's very manual. You have to create your own XDR and paste it there. But what I want to have is templates, that people can use and they will be + +[30:00] able to have like I don't know a drop down. And then select yeah I want to add a maintainer. Boom. And. Then you will have a few fields to fill and boom it will just pre build the easier for you. And I also want to be able to have some things, that you preign. And then everything is being able to be executed automatically and things like, that. So this is coming. But the main feature right. Now is there is you feature right. Now is there is you have this ex, that you can execute and yeah I think, that's basically it. If you have any questions, I don't know. If there was any other questions. I think you got through all of them. Perfect. Yeah, super exciting to see it. I've been poking around a little bit just to play with it and I think it looks great. I think it's a Thank you. It's a great project and yeah, I love to see how much you decentralize the DAB as well. So. So + +[31:00] decentralize the DAB as well. So. So yeah, really interesting yeah, really interesting projects. do did do you have anything else in the presentation, the other presentation?, no, that, was it. yeah. I mean, my next big thing is really to bring anonymous thing is really to bring anonymous voting there. this I'm really excited about. I've been working like a lot on, that and learning as well. I knew nothing about zero knowledge and homorphic encryption and all of, that and I just got the idea. When I saw the Protocol 22 update, that it was bringing, that. And then I started to look into it and I really like the concept and I think, that's bring quite some value to the project. If I had, that feature. And so yeah the I have an open source an open pull request everybody can check out the code and yeah please feel free to also use yeah please feel free to also use what I've discovered And. When what about to for your own project? + +[32:00] Okay. Yes. So Matias is asking yeah using NQG for the voting. Yes using NQG for the voting. Yes sort of like with this badges, that I want to use. So the trustful the project, that I want to use. They they have this concept of badge and I want to sort of have some neurons, which are how many badges do you have and what kind of badges do you have? And this maybe with on top things like okay how many times you we can actually in the history of GitHub we can find your name and this will maybe give you like a power on the actions, that you have on chain to say oh you're an active maintainer and you can also have like some crown job running in the background to say oh. If you've not been active for, that long code wise. Then maybe we remove your code access and things like, that. So you can I'm really thinking about all these kind of things on pola dot. No, I did not know + +[33:00] about, that. It's great. That's why I like Stellar. So much. Have. So many people giving you ideas like, that and sharing interesting things. Thanks. Just. Now I feel compelled to please do have a look. Everything is open. Right. Now it's very easy. I have a monor repo. Because yeah I'm just working myself basically on it and yeah it's on my GitHub profile. So GitHub tonsu. And then Soroban versioning is called the repo. Yeah it's great. I love you bring a lot of new ideas to the table here. I think there's some of the solutions you're working on is something we probably haven't seen or haven't seen a lot of in the community. So I hope this is a an inspiration for everyone else to go and take a look at your project, take a look at the code, see some how you're + +[34:00] the code, see some how you're solving some of these challenges or creating some of these creative solutions to sometimes well-known problems or some new problems, new ways of doing things. So, it was really great to get the opportunity to chat with to and to see the project you're working on and yeah, thank you so much for joining. Thank you very much for the invite and yeah, thanks for the audience for all the nice questions. Okay, great. I think we'll wrap it up here then. We will be back next Thursday and. If anyone out there has a cool project, that they would like to talk about here on the still a developer meeting please reach out to me. I think it's very interesting to see some of these projects and talk to the developers behind it and hear some of their thinkings about how some of their thinkings about how they solve these problems. So thank you everyone for joining. I see + +[35:00] you next week. Thank you. Thanks everybody. Bye. + +
diff --git a/meetings/2025-04-17.mdx b/meetings/2025-04-17.mdx new file mode 100644 index 0000000000..b67c119dbd --- /dev/null +++ b/meetings/2025-04-17.mdx @@ -0,0 +1,70 @@ +--- +title: "Intro to Quickstart" +description: "An introduction to Stellar Quickstart as a local development network, showing how to run Stellar Core, Horizon, and Soroban RPC locally and deploy and test smart contracts in a controlled environment." +authors: [carsten-jacobsen] +tags: [developer] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This meeting dives into one of the most overlooked tools: Quickstart, a local Stellar network environment designed to help developers build, test, and experiment without relying on public networks. The session explains what Quickstart is, why it’s useful, and how it mirrors real network behavior while running entirely on a developer’s machine. + +The walkthrough shows how to install and run Quickstart using the Stellar CLI and Docker, deploy a sample “Hello World” Soroban contract, and interact with it locally. It also demonstrates how Quickstart integrates with existing tools like Stellar Lab, making it easy to switch between public networks and a local setup. + +### Key Topics + +- What Stellar Quickstart is: + - A Docker-based local Stellar network + - Bundles Stellar Core, Horizon, Soroban RPC, Friendbot, and a database +- Supported network modes: + - Ephemeral mode (fresh state on each startup) + - Persistent mode (state and config saved via local volumes) +- Running Quickstart locally: + - Requirements: Stellar CLI and Docker + - Starting a local network with `stellar network container start` + - Configuring Quickstart as testnet, mainnet, futurenet, or a custom network +- Developing with Quickstart: + - Building and deploying a Soroban “Hello World” contract locally + - Using a local RPC URL (`localhost`) instead of public networks + - Invoking contracts exactly as on testnet or mainnet +- Using Stellar Lab with Quickstart: + - Connecting Lab to a custom network + - Generating keypairs and funding accounts via local Friendbot + - Same workflows as public networks, but fully local +- Documentation and advanced usage: + - Network configuration options + - Debugging and advanced settings + - Locking and testing scenarios not practical on public networks + +### Resources + +- [Stellar Quickstart](/docs/tools/quickstart) + +
+ Video Transcript + +[00:00] Hi and welcome to this week's Stellar Developer Meeting. Today I'm going to talk about quickstart. So what is quickstart? Well, quickart is a local Stellar network environment. So quickart is a docker image, that bundles Stellar Core with Horizon RPC friendbot and a database. And quick start can run locally as either mainnet, testnet, futurenet or a customizable local network. Quick start is great for developers who want to test smart contracts, experiment with the Stellar network, and learn about Stellar's features in a control environment. And I can tell you, that SDF engineers use quickart quite a bit. There are two different operational modes. There's ephemeral mode, that cleans the database and start up with a default config at each startup. And. Then + +[01:00] there's persistent mode, that mounts a volume on your local machine and persist the database and the configurations. It's very easy to set up quick start. All you need is Stellar CLI and Docker installed on your local machine. And then you can use the CLI command Stellar network container start to set up quick start on your local machine. In this case, we use a command to set it up as test net. And. When the quick start is set up, you can access test net with these settings. After this presentation, I'm going to give a quick intro to how to actually use it in practice. And two ways to use it is to use Stellar Lab. You can connect Stellar Lab to quick start or you can deploy a smart contract and you deploy it just like + +[02:00] you're used to. First, you set up in this case we use the hello world. You set up the smart contract, you create an identity and you build the contract. And. Then you deploy the smart contract. But instead of deploying it to the test net or mainnet or future net you deploy it to your local quick start environment. In this case we use the local host as the RPC URL to tell Stellar contract deploy command, that we want to deploy it onto quick start. And once deployed we can invoke the smart contract just like we're used to. When we have deployed it to test net or mainnet. I want to put a little bit of focus on the documentation here. If you want to know more in details how quickart works and how to set it up, you can go to our documentation, go to tools and go to quick + +[03:00] start. Here you'll find all the relevant information you will need. You can read about how to get it set up, the network modes, how to work the friendbot faucet, how to debug, and do some advanced settings, that I'm not going to cover in this presentation. But let's try to set it presentation. But let's try to set it up on my local machine. Okay. So here we have the terminal. First. If you remember, we can use a command from the Stellar CLI to set up our quick start. And in this case, I + +[04:00] want to use a testnet. And I just name this quick start mainnet. And, that was it. Now the quick start is up and running on my local system. So let's go ahead and build a smart contract. Actually we'll use the hello world example. And. Then we will need a user. Let's call this a different name, that I have not used before. Call it Frank. And. Now + +[05:00] let's. Now let's build the contract. Okay, the hello world smart contract is. Now built, and now we want to deploy it onto quick start. As mentioned before, we can here + +[06:00] see, that instead of deploying it to the actual test net or mainnet, we specify the RPC URL here and we use the local host / RPC and the rest is as we usually use. When we deploy to test net. Okay. So now we actually have the hello world smart contract deployed to quick start. Next, what we want to do is to try to invoke it and let's use the contract ID was given us here. + +[07:00] and it was successful. So as you can see here once we have set up quick start we can deploy our smart contracts to quick start and run it locally instead of running it on the test net or the mainnet the public ones and it works exactly the same way. Now let's try to connect Stellar Lab instead of using the terminal I have still a lab in this window. And + +[08:00] the way you connect it to your quick start is you go up to the network selector here and instead of test net here you choose custom. I have already tried this. So these fields are already populated. But the quick start URLs are localhost at port 8000/ RPC for the RPC and Horizon is just the local host at port 8000. And the network phrase is the same as. If we were using the public test net. So let's switch to the custom network. So. Now Stellar Lab is connected to our quick start and we can do everything we usually do. When we're connected to either test net or mainnet or future net. Let's just try to generate a key pair and we can fund it with + +[09:00] friendbot. So this works exactly like. If you were connected to a test net. One thing to notice here is, that usually. If you use testnet net you will get a link to StellarExpert. So you can go and take a look at your user account there. Obviously we can't do, that. Because our cell explorer is not looking at our local quick start. But otherwise it works exactly like. If we were working on the public test net. I hope this little introduction to quick start was useful. If you have any questions feel free to ask. You can ask in Discord. I know there's a lot of quick start users in as well. But hopefully this will give you an a great tool to work with for these special requirements where maybe you want to use some locking. + +[10:00] There's some great locking features with quick start. So go explore it. Go take a look at the documentation and see how this can be useful for you. Thank you for joining. + +
diff --git a/meetings/2025-05-01.mdx b/meetings/2025-05-01.mdx new file mode 100644 index 0000000000..15880c2aaf --- /dev/null +++ b/meetings/2025-05-01.mdx @@ -0,0 +1,151 @@ +--- +title: "Protocol 23 CAP Follow-up and Q&A" +description: "A Protocol 23 readiness check covering final questions on upcoming CAPs, with focus on CAP-70 consensus timing configurability and follow-ups on CAP-67 unified events and fee refund event semantics." +authors: + - carsten-jacobsen + - dmytro-kozhevin + - garand-tyson + - leigh-mcculloch + - nicolas-barry +tags: + - developer + - CAP-6 + - CAP-62 + - CAP-63 + - CAP-65 + - CAP-66 + - CAP-67 + - CAP-68 + - CAP-69 + - CAP-70 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This protocol meeting serves as a final community Q&A before the Core CAP team vote on the Protocol 23 (Whisk) CAP set. The group reviews overall readiness, notes which CAPs are unchanged since prior discussions, and spends most of the time on newer or still-active items. + +The primary deep-dive is CAP-70, which introduces configurable consensus timing parameters (without changing defaults yet). The remainder of the session recaps the broader Protocol 23 CAP bundle, with updates and open questions around unified events (CAP-67) and a short discussion on an executable-address getter proposal (CAP-69). + +### Key Topics + +- Protocol 23 status: + - Implementation is nearing completion for Whisk and the Protocol 23 CAP bundle + - Meeting goal: final recap and answering community questions before Core CAP team voting +- CAP-70 consensus timing configurability: + - Make previously hard-coded timing values configurable (e.g., ledger close time and round timeouts) + - No behavior change on activation; defaults match current hard-coded values + - Motivation: enable small incremental tuning toward lower ledger close times over future upgrades + - Risk discussion: + - Misconfiguration could destabilize consensus if set to extreme values + - Suggested mitigation: add tight implementation-level bounds to prevent unsafe ranges + - Consideration of absolute bounds vs relative “percent change” limits, including disaster-recovery tradeoffs + - Need for supercluster testing and careful rollout practices before any future parameter changes +- CAP bundle recap (no major semantic changes noted): + - CAP-65 module cache, CAP-62/66 archival + in-memory resources, CAP-63 scheduling: progressing largely as previously discussed + - CAP-68 noted as potentially optional depending on remaining concerns +- CAP-67 unified events follow-ups: + - Remove muxed source support for classic events to avoid inconsistencies and unclear use cases + - Ongoing open item: how to represent fee charging vs refund timing in events + - Direction: keep events practical for common balance-tracking use cases; accept that some edge cases may require TX meta +- CAP-69 executable-address getter discussion: + - Debate on whether exposing “is this address executable?” could encourage harmful patterns (ecosystem-specific branching) + - Counterpoint: similar checks can be approximated today; a standardized API could be safer and less hacky + - General agreement that the proposed API is reasonable, with caution around transitive dependency pinning and interoperability + +### Resources + +- [CAP-0062](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) — [Discussion](https://github.com/orgs/stellar/discussions/1575) +- [CAP-0063](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) — [Discussion](https://github.com/orgs/stellar/discussions/1602) +- [CAP-0065](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) — [Discussion](https://github.com/orgs/stellar/discussions/1615) +- [CAP-0066](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) — [Discussion](https://github.com/orgs/stellar/discussions/1585) +- [CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) — [Discussion](https://github.com/orgs/stellar/discussions/1553) +- [CAP-0068](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0068.md) — [Discussion](https://github.com/orgs/stellar/discussions/1626) +- [CAP-0069](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0069.md) — [Discussion](https://github.com/orgs/stellar/discussions/1633) +- [CAP-0070](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0070.md) — [Discussion](https://github.com/orgs/stellar/discussions/1719) + +
+ Video Transcript + +[00:00] Great. hi everyone. I think we have bit of an issue here without clear your agenda in terms of who is supposed to speak. I guess the purpose of today's meeting was just to revisit the CAPs, that we have before the final approval and the agenda suggests, that the CAP authors would present them and I think almost all the CAPs have been discussed before besides [CAP-70](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0070.md). So maybe. If parent is here. Yeah. Maybe you could talk about [CAP-70](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0070.md). Because it's completely new. And then we could I don't know I guess quickly recap other CAPs + +[01:00] and there any questions concerning any. Any. Cool. Yeah, that sounds good. So I guess getting started with [CAP-70](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0070.md). This one is a recent addition to Protocol 23. But it's fairly small and straightforward. And. So essentially what this does is it introduces a couple of new network config settings, that control some of the timing parameters around consensus. So essentially today like during consensus there's a couple of hard-coded values such as the block of the ledge or the length of the ledger being 5 seconds as well as some of the more nitty-gritty details on consensus as to like how long nodes wait for certain rounds like timeouts between certain consensus rounds and things like this. And. So essentially this CAP doesn't actually change anything. But it just allows us to change these values in the future. So for instance today Stellar Core all the + +[02:00] nodes are hardcoded to have a 5-second ledger time. But with this CAP it allows, that to be dynamic and configurable. And. So kind of the motivation behind this is, that as we want to move towards you know higher throughput and lower block times making you know large jumps is difficult. Going from 5-second blocks to 2 and 1/2 second blocks is very challenging. So the goal with this is to allow us to kind of make small incremental changes via SLPs where you know to get to, that goal of 2 and a half seconds or whatever it may be you know we start by just trimming off like one or 200 millconds at a time and over time we'll get there. And so essentially this CAP introduces all these new config settings. But the default values are the exact same values, that are currently hardcoded into core. And so [CAP-70](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0070.md) will have no noticeable change. When it's initially implemented. But just opens the door to have more configurability and hopefully tighter iteration cycles as we go to + +[03:00] improve consensus and nomination timing in the future. Yeah. So, I think it's pretty straightforward and probably not super controversial, but. If anyone has any questions or wants to talk about, you know, feel free. Yes have a small question, which I kind of already asked in the. But basically is it possible like for example for seran network settings it is relatively easy to understand what every settings means and you know. If you're for example increasing the prov limit for instruction like 100x it is kind of easy to spot, that it's probably not going to work. Because we will take 100 times more time to close the ledger. So my question here is how risky is this set of parameters? Is it possible to accidentally v + +[04:00] for an upgrade, that breaks the network in an unintuitive way. Because we're kind of altering the consensus algorithm, which on its own is used to well do everything on the network such as upgrades. So unbreaking the network would be tricky. So you know how risky it is to break the network. Yeah. So I do agree with you. These are pretty kind of you know low-level technical settings. I think kind of the reason it makes sense from a technical standpoint to be config settings is, that these are things, that we want to change often and kind of slowly over time to you know make small incremental progress and all the nodes have to agree on them. So it makes sense to be conflict setting. But they definitely are difficult to understand. And. So I think as far as validation pretty much the only tool, that we have is supercluster and kind of simulating the network and seeing the effect these values will have. And they definitely have effects, that are sometimes not + +[05:00] super intuitive. And. So I think what we might want to do is at the especially. When we just launched these features is to put tight bounds onto the acceptable values. So for instance like. If today the default value for you know electric close timer is 500 or 5,000 milliseconds or 5 seconds. If you were to change, that to 50 milliseconds the network, that bad things would happen and the network would be very unstable. And so perhaps what we can do is given, that at least my thinking the goal with this gap is to make small incre improvements we could have very tight bounds just at the you implementation level. And. So say for instance like in Protocol 23 we could say, that the lower bound on the ledger close time is like four 4 seconds or maybe like 3 and a half or like, that. And. So worst case you know. If someone + +[06:00] was to propose like a 50 millisecond change it just wouldn't go through. And you know even like these lower bounds I think these would be like kind of guesses at this point. Because there's still lots of testing and validity checking, that needs to be done on these values. But I think like you know. If you go from 5 seconds to 4 seconds, that might still cause some network instability. But I don't think you would break the network in the same way, that you would. If you went from like you know 5 seconds to 50 milliseconds. And. So would something like, that maybe some tighter bounds alleviate some of your concerns? Yeah. Yeah, that makes sense. This I still feel it might be a bit scary. But yeah I guess we can try make sure, that maybe one thing I thought of. When you were talking is, that maybe we could limit the upgrade not only in absolute sense. But also in relative sense, that you cannot reduce by more than + +[07:00] x% the current value something, that so, that yeah we don't need to guess too hard. It just kind of enforces incrementality. Incrementality. Yeah, that's a good idea. I think I mean like relative values like might have some potential issues like. So for instance I think I like the limited bounds a little bit better than relative changes. Because you might be in a scenario where for some reason the network is broken either. Because you've you know changed limits in some way or maybe, that there's like some sort of like a thing, that's external to these parameters, that's gone wrong, that also needs fixing. So I could see a possibility where due to the network being in a bad state, it would be advantageous to make large changes to these values. And. So I don't know. If the relative sort of + +[08:00] safety measure is as safe as it sounds from like a disaster recovery standpoint. But I think limited bounds at the minimum would be good and like even for like the relative you know limit the relative limitations. The issue is, that for some of these values you can you know do damage going in both directions right. So like you know increasing nomination timeout may improve performance. But you know. So what I mean by this is, that like these parameters it's not, that like you know the network gets more unstable. If you go you know one direction. And then less or. And then more stable. If you go the other. And so I think you. So having relative bounds might be a little tricky. I agree like the yeah having bounds in general is just absolute bones is probably the way we want this. like I think we may want to have + +[09:00] those be more like some sort of like config settings. Because I could see like for as the goal of this is to understand it in you know like in the context of like we want eventually to move those in the on the public network have like the same defaults. So, that public network yeah is like it would take basically many you know many things to be overridden. For for a bad vote to be accepted. But in a other environment like test net or a test cluster maybe you want to actually see what happens. If you set those things to you know like half a second or whatever. So yeah. So I think it's the type of thing, that we should do on the thing I wanted to add, though to this CAP is, that yeah it's the it's only the first building block I mean or the first change I think I + +[10:00] imagine, that is needed to really move on the latency front. Because I can see certain things like, that are going to be impacted as. If HD this runtime gets too small cuz today we have for example the a lot of things on the network, that are tied to like this 5 seconds. So like the thing I can think of is we take snapshots every 64 ledgers or the how fast do we spill from you know one level to another. Those things maybe need to change as the block time gets goes down and right. Now they are not configurable. of course changing the u speed of spilling is a much more involved change than changing the runtime. But, that's I + +[11:00] think something, that maybe we'll have to think about. Yeah. And I think, that's one of the things, that makes like this is a complex thing. Interesting is, that you know lowering the block time is down is bound to break something. I mean you know we can test dollar Stellar Core and simulation environments and stuff and even. If you know the actual like layer one doesn't break downstream like something will break somewhere for sure. And. So the hopefully. If you know shave 100 milliseconds off the time you have you know one thing breaking at a time in minor ways whereas. If you went from like 5 seconds to two and a half I'd imagine many things would break all at once, which would be sad. I have more questions. But the only comment, that I have is, that again this is a similar property of all the settings. But since there is no like protocol version bump attached, that doesn't, that means, that network May + +[12:00] run on different versions of the core software, which means, that the plan of like doing an upgrade based on some minor release changes may not be necessarily sustainable. Because it's kind of tricky to get everyone on the same version of the software and I guess this may like we can try to make it happen. But still imagine the like major changes this direction would still need to be tied to like major releases probably. So I'm understand config setting. It's just an observation, that even, though it's a config setting, we might not be able to change it too often. Often. Yeah. And I think, that's why I'm kind of in favor of like a runtime bound limits. I think Nico might have been hinting at like having like a config setting, that's like the actual value. And then an additional config setting for the bound. + +[13:00] . But I think like to Dimma's point, you know, like a Protocol 23 package, it's it should be known, that like, you know, the 23.0 package probably can only run within like these timing envelopes. And. So I think, that might be what, what's interesting, is, that, you know, even, though technically it's a config setting, what we might do is, that we can only, you know, even, though the bounds are, part of runtime or, you know, they can even be part of the protocol at this point. And. Then like say the bound like the lower bound is 4 seconds in P23. And then the lower bound in P24 could be like 3 seconds. And. So you don't change the actual value on the P23 to P24 upgrade. But you chose you change how low the value can be. And I think, that might be the best way to kind of think about these settings is, that you know there is the bounds can change as you know our latency on ellipse gets better. But the actual value is dependent on the network config settings. Yeah. And I guess to, that point like + +[14:00] since bounceration yeah actually not sure. If you can make some notes to disagree on the upgrade validity maybe yeah anyways, that's not super important. But yeah something to I don't think this really needs to be part of the protocol I mean I think this can be like a safety thing where you know there's like a contract, that the upper and lower bound are just you know constants in the C++ code similar to how the upper and lower bounds for other config settings are and there is just like a agreed upon you know contract, that you change these bounds on protocol boundaries I guess there's a question chat. So do we run the changes on the super cluster. And then provide the validators input on any hardware of AM upgrades in a timely manner I guess can answer yeah. So I think yeah I think the answer is + +[15:00] yes. So we're doing lots of supercluster testing and I think part of this work too is, that I've been kind of running you know some one-off tests myself of late and I'm not sure. If the default values on mainnet today are actually the best values for the network as it exists. In particular the 1 second nomination timeout value and the 1 second ballot nomination or ballot timeout value are kind of like hard-coded magic constants, that have been in core forever, that we haven't really questioned. And so I think part of this. While not part of the actual you know CAP itself is you know after we've released 23 doing testing both in supercluster and you know intestance and things like this and finding out what the true value of these things should be I don't think you know ledger close time I think, that one's you know pretty set at 5 seconds. But for these other values, I think there probably is some experimentation to figure out what the actual correct value should be in Protocol 23. And then of course in future + +[16:00] protot figuring out the correct values are there as well. I have a lot to say. The intention is not to you know I think to answer those question more specifically to not use these config parameters to like force hardware changes or like force validators to you know upgrade to beefier EKS. I think, that's a different conversation. But. So so I think yeah we'll definitely test and the goal is not to use these parameters to require you to you know buy more expensive boxes. Right. And guess. If there are no more questions on this topic, we can make a quick round up propos this protocol. This is what this med is supposed to be about. Yeah maybe can I suggest like yeah not spending a whole lot of time on each CAP and it's more like. If there are CAPs + +[17:00] that maybe had like some significant or some changes in the last you know last month maybe we can briefly talk about, that. But otherwise yeah like just yeah exactly, that was my plan basically what I wanted to start with is quickly go over CAPs, that haven't changed and I, That's the only thing the only action item, that remains with them is just to approve them I guess. So no particular order [CAP-65](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0065.md) reusable module cache has been both finalized a. While ago and also implemented and merged. No changes there. There. Then. Then parallel sorry memories. Okay. [CAP-62](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) and 66 about state + +[18:00] archival and in memory resource current are still in implementation progress. But there were no changes to the CAP semantics as well. So please stay as they are. Are. CAP. Yeah. [CAP-63](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0063.md) prior proism friendly transaction scheduling. again no changes. Changes. Protocol part of the implementation work is done for the most part. The actual parallel execution is work in progress. But again no substantial changes + +[19:00] there. There. CAPs [CAP-70](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0070.md) has just been discussed. [CAP-69](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0069.md) new host functions hasn't been implemented. But there were no new suggestions there and what remains are [CAP-68](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0068.md) and 67 and I guess for [CAP-68](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0068.md) there weren't any changes themselves as a CAP about host function for getting executable for a contract address. But I know Lee is Lee here might not be here. We had some concerns about necessity of this function and we could maybe work it out during this meeting. But yeah, since we doesn't seem to be here, I guess we'll take this offline. But in + +[20:00] any case, I don't think there will be changes to this cup. Like the worst case is, that we'll just not do it. If you think it is not necessary. And I guess what remains is [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md), which is the unified asset events. And there has been a lot of discussion about this CAP and pretty sure there were changes to it recently and the changes were basically ironing out some edge cases. Specifically specifically okay looking like working backwards from the history what we did update is, that we completely removed from max ID from all the classic events. So for some context + +[21:00] originally CP 67 proposed for transfer events emitted by the classic operations this [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) and by sban token transfers starting from prole 23. Initially there was a proposal to allow multiplexing for the transfer source. But since it leads to a number of inconsistencies and is generally not something super widely used basically it's like a transfer where you want to know, that some virtual cast account has initiated it even. So I still need to sign for it. This kind of a weird use case and I guess the outcome of the discussion on all the edge cases this comes with was, that let's not do this for a. While until at least next + +[22:00] protocol and let's just focus on the minimum things, that we definitely know is useful, which is to multiplex idea and we know this is useful. Because this is how exchanges typically identify their users, that represent a non- custodial accounts. So they can attribute token deposits to their owner database. So this is one change. another change is actually what makes this CAP still not ready for the final approval I guess is the change to how we emit the information about the fury funds. So again from the previous discussion the current status is, that it will emit two events called fee + +[23:00] and the first fee event is what we charge initially before applying the transaction and the second event is also a fee event. But with negative value and it represents a refund. So no and Soroban refund there is refund both portion of the resource fee. So whatever portion of the fees, that hasn't been used will be funded after applying the transaction and this is what we have in kept now. What we still haven't converged on is whether we want to add some additional attributes to this fee events, that tell the consumer of the event. When exactly did this charge appear. Because fees are charged before all the transactions are applied. So it's like not a part of the typical transaction + +[24:00] flow and fee is refunded before Protocol 23 it is refunded after the transaction is applied and starting from Protocol 23. If you will be refunded after all the transactions are applied. So there is this annoying inconsistency with all the other events. Because all the Azure events happen during transaction being applied. Fees happen before sometimes after all the transactions were applied. And I guess the outcome is, that we might alter the MXR slightly to add some information on what is the timing of the FE to kind of make this more obvious. But yeah, I'm still not sure. If we have converged this value. So this is something I guess we'll try to prioritize and finalize this CAP. Yeah, it + +[25:00] is yeah I guess not a major is happening here but, that's lots of small pieces, that it keep coming up. So kind of it. So I guess, that's the only CAP, that is still not fully finalized. Finalized. And I hope this timing will be the last small, that we kind of clarify. Yeah, that's pretty much it. Right any questions from this camp or any other CAPs? But again as I said before we proer CAPs are no changes. + +[26:00] I guess there are no questions. So I don't know anything else to discuss it. Yeah. I was curious. If yeah. If if there was like any feedback on the restriction, that were put on the memo from anybody. Okay. Yeah, I don't see any feedback. My my concern generally is, that this stuff may not be super discoverable. + +[27:00] my suspicion is, that. If we make some incorrect decisions in terms of event layout, we'll learn after we have launched [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md). When people start actually consuming the events and find out some use cases, that are not covered. To be frank at this point, I think we should just converge on something, that simply works. 95% of the cases and yeah. If we find some issues after it has been launched we can address them in the future protocols the good thing about ma events is, that part of the protocol. So we can issue a quick fix some information is missing or not necessary or not represents a value as it should. So it is fixable and I think a [CAP-66](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0006.md) to + +[28:00] 67 is in a reasonable scale trip. Now and it's too much room for centralization. Centralization. So I mean we are obviously open to the feedback. But yeah as I said objectively like I feel like at this point like we got in such edge cases yeah not sure. If it is clear enough for anyone who's who was even going to consume this events I don't think it's clear enough for them. But things are happening or Okay, can see typing. I've been talking a bit about [CAP-69](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0069.md), the getter for the address executable. Executable. yeah, for [CAP-67](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md), the timing of + +[29:00] the fees. I don't think this has been finalized. We need to finalize this I guess outside of this meeting. I think you're almost there. We can just add the timing email to the transaction events. But yeah, we still need to make the respective update and post some discussions thread I guess. So what I wanted to talk about is [CAP-69](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0069.md) I get for the address executable I know, that you had some comments on this and there was some amazing discussion. So maybe. If you wanted to talk about this like you're concerned about it in general we could go on the stage + +[30:00] Yeah, I think I don't want to read just rehash what, we discussed about I think I was just like a little bit fuzzy on it. second I'm just trying to find the cut. Sorry, what was the number?, it's 69. Oh, sorry. 16. No, that's Yeah. Because we split it. We we, That's right. We split up into + +[31:00] two discussions. So I guess I kind of like here is my point of view on this like I kind of understand some of the concerns about like the use cases for this, that say not as well defined but, that said just from the API standpoint I don't have like what I'm looking for is. If there are concerns for the API itself like. If someone thinks, that it is not the information we should be exposing at all. Because if there are concerns like, that then, that's valid we could discuss them. But you know. If you're like trying to understand like the nuances of the use cases I like I feel like we can fulfill some of the + +[32:00] use cases at least and we can work on more advanced tools ess. Ess. Yeah, I think like the API, that's proposed is very reasonable. I think it's the question is what you just highlighted in the is it is there a reason not to expose this information? And it's sort of it reminds me a little bit of the. When we were looking at what APIs to expose and what ones not to expose. One, that we chose not to expose, that other blockchains do and Ethereum does is the concept of a message.cender. So like I think you know. If there was a proposal to add message sender to Soroban it would also look sort of similar to this one. It would look very reasonable. It sort of makes sense as a primitive, that a contractor would have access to. But we made an intentional decision not to + +[33:00] include it in the original you know CAP 46s. Because in we have seen the effect of it in other ecosystems where you know it can be used inappropriately you know in a way, that it's like a security issue. and it sort of it provided a view into who's calling you. as opposed to like what the or framework, that we've designed is like a better safer way, that abstracts the concepts of like accounts and contracts. So yeah, I guess, that's, that's where I'm coming from. That's where my concerns are coming from. And. When I like. When I read through these motiv the motivations in here, I think I understand the motivations. Now but it doesn't really completely address those use cases. So you know. If you want to pin the exact + +[34:00] implementations yes it does work for direct dependencies. But not for you know transitive dependencies. So as a primitive, it sounds like it will fit some use cases. It's unclear. If it's completely solving those use cases to me. And. Then yeah, I just have this concern as well of will this result in patterns of use, that harm interoperability. yeah, I agree on the parent's point. I'm not sure. If transitive dependencies necessarily like this have I think two concerns, that are kind of incompatible right. Because like. If we went down the pinning road. Then like and the concern is, that the pinning is not strong enough and we should also allow for transitive pin. Then we are actually going deeper + +[35:00] into the whole like no interoperability. Because not Not only do you like pin the direct call. But you also want to pin something, that happens in the like as an implementation detail of, that call. honestly, like the harm well the harm impro interability piece. Sorry, that's I think there's like a few different, that's not. So much about the pinning. It's more to do with making contracts aware of say the Stellar asset contract versus another token. And then writing behavior, that you know is explicitly different for the two. Two. Yeah. I mean I kind of guess it's concern like this asset contract specifically say you can do this right now. If you want to. You need to like do a bit more work + +[36:00] than do, that. So given, that like for the site versus non-sack like I kind of get the concern. But we cannot really hide the either. So I don't know like yeah not Oh you're saying you can do this today. You can already do this today. Because you can go and get the name. Yes. And you can encode the XDR. And then verify. If they match the same. That's a good point. Right. So. So basically like here the trade-off is not between like whether like this is faster or not. The trade-off is like. If people want to do it, should they keep doing this in a hacker, less efficient way? And you know, whether their use case is good or not, like they still miss themselves into it. Because they kind of messed up with extern versus providing something, that at least they kind of know works. And yes, + +[37:00] you can build something better out of it. But you know, on the other hand, like there are different ways of kind of sensoring, which contracts can interact with your contract. And this is actually something, that people have been requesting and this is like how stuff like string key contract ID hard coding might end up working and people kind of keep coming up with this idea of like what. If I want my contract on the interact with for example XLM or something. So I don't know I feel like you're kind of past the point where you could make things truly abstract. So I kind of get this concern. But yeah at this point I feel like for the most part like much about it this guess you can get any of any contracts at least you can hard codes and contract IDs. + +[38:00] Yep. Okay. Yeah. I don't have anything else to add to it. I'm not like strongly opposed to it. They were just my concerns. So I get the benefits and yeah I think it looks good. Thanks. Sounds good. Yeah. I mean yeah I'll think a bit more about this like and yeah maybe double check with other folks. But yeah honestly like I feel especially for something like transitive dependencies like this is I think this actually strikes reasonable balance. Because like you still can achieve, that. But you need to make this explicit and making this implicit sounds like a bit sketchy. Because technically you shouldn't have control over what other contracts are doing. So, I don't know. Yeah. I guess, that's + +[39:00] kind of was it more or less. Yeah, I'll think a bit more about this. But hope should be more or less on the same page here and should be good to go with this kind. Great. I guess it should be truly for today and there are no more concerns. Thanks everyone. Yeah, thank you. Later the next protocol meetings and... + +
diff --git a/meetings/2025-05-22.mdx b/meetings/2025-05-22.mdx new file mode 100644 index 0000000000..aaa75e0b55 --- /dev/null +++ b/meetings/2025-05-22.mdx @@ -0,0 +1,108 @@ +--- +title: "OpenZeppelin Monitor" +description: "A walkthrough of OpenZeppelin Monitor for Stellar, explaining how to configure contract monitoring and alerting via Soroban RPC, with examples for tracking function calls and events and sending notifications to common channels." +authors: [carsten-jacobsen] +tags: [spotlight] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This meeting introduces the OpenZeppelin Monitor as an open-source monitoring and alerting tool for Soroban smart contracts. Bram Hoogenkamp walks through the repository, the core configuration files, and how developers can set up monitors without changing their contracts—by watching function calls, events, and transaction outcomes via Soroban RPC. + +The demo shows how to configure alerts for a real contract and deliver notifications to channels like Telegram, Slack, Discord, or email. The conversation also touches on how monitors can pair with upcoming relayer support to enable automated incident-response actions such as pausing contracts. + +### Key Topics + +- What OpenZeppelin Monitor provides: + - Off-chain monitoring for Soroban contracts using Soroban RPC + - Alerting/notification when specified conditions are met + - No contract-side changes required; logic is configured externally +- Repository and setup: + - Rust-based implementation with local binary execution and Docker support + - Example configs and docs to bootstrap new users quickly +- Core configuration components: + - Network config (RPC URL, block confirmations, polling schedule) + - Monitor config (contract address, contract spec, conditions) + - Triggers (Slack/Discord/Telegram/email/webhook/custom script) + - Optional filters to pre-filter blocks before applying monitor logic +- Monitoring capabilities: + - Monitor specific contract function calls (by signature) and apply parameter-based expressions + - Monitor emitted events, including positional parameter filtering where spec support is limited + - Monitor transaction-level outcomes (e.g., trigger only on successful execution) +- Demo flow highlights: + - Configuring a monitor for `swap` calls on a DEX contract + - Replaying a known block to validate configuration and trigger a test alert + - RPC retention considerations mentioned for replaying older blocks +- Automation and incident response direction: + - Custom scripts can forward context to external systems + - Planned integration with OpenZeppelin relayers (Soroban support “coming soon”) to enable actions like pausing contracts after suspicious events + +### Resources + +- [OpenZeppelin Monitor Repository](https://github.com/OpenZeppelin/openzeppelin-monitor) +- [OpenZeppelin Monitor's Documentation](https://docs.openzeppelin.com/monitor) + +
+ Video Transcript + +[00:00] Hello and welcome to this week's Stellar Developer Meeting. Today I have a guest from Open Sein. I have Bram. Welcome to the meeting and please introduce yourself. Hey Ken. Thanks and happy to be here. So yeah my name is Bram part of the OpenZeppelin team. Working in the product team and we are currently like really focused on our open source efforts of, which like developer tooling is one of them of course we are. Now working closely together with Stellar and in terms of the developer tooling we just released our open source monitors. So yeah I'm going to share, that today with you guys great super interesting I don't know a lot about the monitor yet. So, I'm looking forward to + +[01:00] this demo as well. Awesome. Great. Yeah, I will show you guys how to set the monitors up today. Can I like start? Yeah, you can share your screen. Cool. Nice. I think my screen is already shared, right? Yeah. Here we go. Hopefully everyone can see it. So I want to start with the repository for you guys to know where you can actually find the open source monitors. So. If you go to the OpenZeppelin organization and navigate to the OpenZeppelin monitor repository you can find the codebase. What you can subsequently can. Then do is of course like clone the repository. We implemented it in Rust. So there's also a requirement to install Rust to actually like build and compile all the code to be able to. Then also run + +[02:00] it. And over here you see the file structure of the open source monitors. And there are actually like two or three things, that I especially want to highlight. The configuration is especially really important. So we are going to talk a little bit through the configuration as like most of the work, that you are going to do as a developer to actually set up these monitors is going to be in the configuration. So today I'm also going to walk a little bit through the examples. So we also have a configuration example and for everyone like starting out in this repository, I would also recommend to have a look at the different examples. Because this can also really help you to like get up to speed on how to like actually configure everything. We of course also have dogs. But for some people it might be a little bit more intuitive to just like first have a look at the examples. I will + +[03:00] also share the docs at the end of this call. And then today we are going to run it locally. So we have this binary here, that we can run to spin the monitors up locally. But we also allow for like dockerized runs. So we also have docker files here. So you can also like create a docker instance and run this docker instance for example in your cloud environment or also just locally. If you prefer it to run locally. So next I want to go a little bit into the configuration and kind of what the monitor is built up from. So there are kind of like four main components of, which the monitoring configuration is like kind of the main component. So this like monitoring configuration kind of + +[04:00] consists all the conditional logic, that you can apply to for example smart contract observability and it also includes for example the filters, which allow you to like. When we pull blocks on chain and we are actually going to filter through these blocks. You can also apply certain filter filters before you actually like go through the actual conditions within the monitor itself. So an example of this could be for example, that you only want to filter for like even blocks. So we created a example over here. This might not be like a real world use case. But just to kind of showcase what it can do. You can for example before you kind of go through the whole block and actually apply the monitoring logic, which we will be defining in this configuration. You can already do some prefiltering by + +[05:00] for example in this case filtering to see. If like block is even numbered or a block is odd. So in this case we would only process the even numbered blocks. So those are the filters. Then another really important component is of course the network. So we also have network definitions here. And in the case of Stellar we need to define this network definition over here. We already have an example, that you can use. You have to define the network type slug name the RPC. So this one is not prefilled. So you can actually choose your own RPC. And then there are some other parameters, that you can also set here. So for example the confirmation of blocks. So how many confirmations of the blocks do you actually wait before you for example trigger a monitor. So. If you detect a certain function trigger on chain or an event, that is being emitted in a certain contract like this would like allow + +[06:00] you to set the amount of blocks, that you actually want to wait for confirmations of until you actually like trigger the monitor and notify someone. Then we also have a chron schedule. This one is also an important like parameter to highlight. This allows you to kind of set a schedule for the amount of times, that we actually make an RPC call and get the latest blocks. So in this case I defined it for like every minute. So every minute we like pull all the latest blocks in and we process all these different blocks. But you can also set different schedules for example every once every five minutes or maybe like shorter crons here. So, that's the network configuration. And then the another of course really important components are the triggers. So. If a monitor actually triggers and + +[07:00] you want to alert someone from like a function call happening within a smart contract or an event happening you can also define these triggers. So we currently support Discord, email, Slack and Telegram as notification channels, that you can like send the notifications to. But we also define web hook notifications. So it would also be possible to like trigger a like external service through a web hook. When the monitor triggers. And there's also a option for custom script. So. If you actually want to do some post-processing after the moni monitor triggered, it is also possible to define a script in, which you also define some custom logic and you can process the data maybe make integrations with your own observability platform like data dog or obsgeni. We are currently still also looking to actually support those. But. If you already want to start with, that the + +[08:00] custom scripts actually allow you to create this like custom logic and like post the data to maybe such like an observability platform. But this is only one example like what you can actually do with it is up to you and you have a lot of freedom here. Then of course the most important configuration file here where everything comes together is the actual monitoring configuration. So what you have here is you can give it a name. If the monitor is paused or not. In this case we don't want to pause it. Then you have to define the network. So, that's again the network configuration here. You can select it by taking the file name and like inserting it here. And. Then what you can subsequently do is the actual like implementation of like the monitoring logic. So in this case as an example I took the address of + +[09:00] the Aqua DEX. Because I want to like filter for a swap happening in, that DEX. So what we do is we define the address of the contract here. Subsequently we also include the contract spec. So in this contract spec I only added the actual function, that I want to monitor for. But you can also like post the whole file. And underneath here we can actually set the conditions on, which to trigger the monitor. So we already set like one condition of course like we want to filter for a specific contract, which in this case is a DEX. Then you can also set certain conditions around a function trigger for example. So I don't want to like trigger the monitor for every function call or event, that is being emitted from this contract. I want to actually look for the swap function signature. So. If someone is + +[10:00] interacting with this function within the contract and the you can also set expressions here. So and the out underscore min variable is higher than this number. Then we actually trigger the monitor. So you can also say okay I actually don't want this express expression. You can also just have it like this. Then it will trigger on like all the different swaps. But the expression allows you to also use one of these parameters, that is passed to this function to be included here and use it with like conditional logic to actually filter out maybe a like call from a specific address, that is included in this swap or the amount out being higher than a certain threshold. So, that is one way of filtering. But we also allow for events. So it's not only possible to use functions. But also events in this case. + +[11:00] It works similarly to the function. You can also set an expression. There's only currently one caveat with the expression and, that is. Because like in the contract spec events are not supported yet. You have to use positional parameters instead of like being able to use the actual parameter their name. So you have to use zero or one or two to kind of like pinpoint, which variable in the parameter of the event you actually want to create like conditional logic around. Then we have like the third and last fields we can also monitor for and, that is like on the transaction level. So in this case you can also allow for certain like logic over here for example. If the status is success. So this, that means, that the transaction was actually executed you + +[12:00] can you want to trigger the monitor. So. If this was for example failed you don't trigger it. So. Then the last two parts of the monitoring configuration is trigger conditions and triggers. We already also talked a little bit about this earlier. So. If we go to triggers in this case we use the select channel. So. If this. If these conditions are met we notify like the select channel, that this monitor was triggered. And. If we go for example to a certain slack configuration here you can also set the title of the monitor trigger and also the message and within, that message you can actually also include certain parameters like for example transaction parameters or the event parameters on, which it actually triggered. So you can also give all of this context and + +[13:00] include it in your monitoring message. Then of course you can also include other triggers. So you can just add Telegram as well or email and yeah I'm not going through all of them. But like the configuration for all of these notification channels is pretty straightforward. And. Then the last variable, that I want to talk about here configuration variable is the trigger conditions. So what you can do over here is what I mentioned around script. So you can also run a custom script. So the monitor will pass the context to this script and within, that script you can do like post-processing on the data, that is given to this script. So around the monitoring context maybe pass it towards a observability platform or call a third party service. can also be called of + +[14:00] course through this trigger by web hook notifications. But maybe for some reason you also need to include like special API keys or something else, that doesn't completely like is not completely compatible with these web hook notifications you can use scripts to have like all, that freedom to yeah create your own kind of flow of logic here. Then I set up an actual example. So let's quickly have a look at, that example. So. If we go to configuration, you can like copy this configuration also in this config here and try to run it. But in my configuration, I didn't use any filters. Of course to find the Stellar mainnet network. It's the same file as I just showed you. And in this case, I created a telegram bot using the botfather. That's a channel, that you can like + +[15:00] start a chat with and it will create a bot on telegram. Some of you might already be familiar with this and you can you subsequently. Then get like a bot ID and you can post, that one here. this is also a unique ID, that you need to post in. And. Then my monitor is. Now like able to send out alerts to this Telegram channel, that I have here. As you can see, I already tested a little bit. But we are going to do it again. So. If we go to my monitoring configuration, I also want to show you this. So I'm currently moni mon monitoring for the Aqua DEX for a specific function call called swap chain. This function is being called. When I do a swap on the aquedex and over here I just want to filter for like all the function calls like with the signature swap chain. So every time someone does a swap, I + +[16:00] want to trigger like a telegram notification saying, that like a swap was being done on the yeah Aquedex. This might not be the most practical monitor like as like a real use case. But just for demoing purposes, I wanted to show you this. So what you can normally do is run the monitor like this locally or like to docker compose. Now the docker the monitors up and it's going to pull in like all these different blocks. But for this demo like purpose I also have another command, that I'm currently going to use, which is this command where you can like replay a block and apply the monitoring logic to, that block. So we can see. If it actually works. So we don't have to wait for actual swap to be done on the Aquedex. So. If we target this block, I know, that this + +[17:00] function to this contract is being triggered in this block. And. If we actually trigger it here, it's going to search through, that block, apply this monitoring configuration to it. And as we can see, we found one match. And we also already saw my telegram giving me an alert and over here you can see, that the monitor was triggered. So this is yeah this is an example of kind of like a monitor, that you can configure. As I already said this might not be the best like real world use case. But you can do a lot of different things. Think about like access control monitors. So. If you implement access control within your smart contract, you can check for certain grand ro functions being triggered. For example. If someone is trying to gain access to certain functions. If like a large mint is being done on a token contract or any of + +[18:00] these use cases, there's like a lot of monitoring us cases, that you can set up to like increase, that like operational security around your smart contracts. And one thing, that I'm also really excited about, which we are going to launch in one and a half to u I think one and a half month to two months is also like Soroban support for the relayers and the relayers is one of the other tools, that we are currently working on to add Stellar support, which is a externallyowned account or wallet, that is, that you can interact with through like an API or SDK. So what we did over there is we kind of abstracted away all the complex transaction management. So think about transaction submissions, resubmissions, gas pricing, sequence number handling and all of those and also of course + +[19:00] like secure key storage. So we also securely store those keys in a fault. And with the open- source relayers, you are. Now able to actually like self-host these relayers. So have total control over the these relays. And then be able to like really easy through like SDK or API interface interact with these relayers and easily submit transactions in yeah in a simplified way. And what you can. Then do with like the monitors and relayers is you can actually use for example the custom scripts, that I talked about over here in the examples to like get, that monitoring context. And. If we for example let's say take the use case of a stable coin contract where a large mint happens and we see, that the mint actually is not from an address, that would usually do the mint. So someone might have gained some kind + +[20:00] of like unauthorized access. What we. Then subsequently can do is trigger like a custom script, that calls the relayer, that subsequently like executes for example a pass function within the stablecoin contract to yeah make sure, that we limit the damage, that can be done to the stable coin contract. So incident response we would call it is like one major use case and of course there are like smaller use cases, that you can set up within this category. But there's also like a lot of like more generic automation, that is possible. So let's say another example is, that. If you run a DeFi project or like a trading company what you can also do is look in different types of like DeFi protocols. If certain thresholds are being met and you can. Then subsequently also use the relay relayers to for example rebalance or liquidate certain + +[21:00] positions within certain pools. So this is kind of also the type of automation, that is. Then possible and it kind of easily kind of integrates together using these custom scripts. So, which you can of course trigger from your monitoring configuration as I showed you here. So yeah, that's an high level overview of the monitors and yeah I think yeah I give you gave you all the context, that I wanted to give you today. So. If anyone has any questions feel free to ask and actually I want to also quickly show you can also find the docs over here. So. If you go to docs.opuseppappelin.com docs.opuseppappelin.com open.com. you have a tab over here, which says open source tools and over here in the relayers there's no not yet Stellar support coming soon. But in the open zeppeline monitors we also give + +[22:00] like a really easy quick start, that you can like follow with the first few comm commands, that I didn't go through here. And then how to set it up for Stellar. So it's like pretty easy to set up and. If you guys have any questions around this feel also feel free to ask we have like a community channel. Now also on Discord OpenZeppelin um_tg. So also feel free to join, that channel and ask any questions and of course we are here. Now also to answer some questions. So yeah great is super interesting. So I can start monitoring my smart contracts without setting anything up in my smart contracts is all the configuration is done in the config files you're showing. Yeah, exactly. Everything is done just in those like four configuration files and of course you can make them more complex. I gave some simple examples. But you don't have + +[23:00] to implement anything within your contracts. So. If it just has these function signatures, you can like import like the contract spec define the function signature, that you want to monitor for or the event signature and u yeah you can just like spin them up and as you can see I don't know the Aqua DEX team. But I can like monitor their contracts just by like setting up this monitoring configuration. So great. Actually had a question earlier says how does the replay a block? Does the block have to be within the retention where of the RPC? RPC? There might be a limit to this. I'm actually not 100% sure on kind of the limit of the RPC, how deep we actually can go into the blocks. So, I would + +[24:00] say you might have to sometimes like do another test transaction to like h being able to use, that command again. But yeah, you guys probably know it better than me what the actual limits on the RPC are here in terms of like the depth, that you can go to replay. I'm not see the RPC has a 7-day retention. Retention. Okay. Okay. Okay, great. Any other question? Any questions from anyone who's listening in here? Okay. It doesn't look like it. But I think it was great you mentioned, that you have a community. So, anyone who's interested in trying this out and maybe set it up for their project, if. If you have any + +[25:00] questions, any help is needed. It sounds like the community is a good place to go and you already have examples of how to set it up and the repo. Yeah, feel free to ask and also around like potential use cases. It's also maybe an interesting discussion. If you just start with monitors you might also think okay what do I actually want to set up here like what can be valuable for us. So. If anyone wants to have discussions around, that also feel free to join and we can give you some examples around like the use cases, that we see a lot around people using the monitors. Okay, we just got one more question here. for the confirmation block on the config, what should we set it to? Is it for waiting for finality? Yeah. So, we just like set it to two. But I have to ask actually one of the developers. If there's like a perfect + +[26:00] number, that we should set it to. I'm not 100% sure. If there's like kind of a number, that really works well. So, I have to ask our engineering team. Okay, great. But yeah, thank you for joining. It's super interesting. I think I'll go ahead and play with it myself. I love these kind of solutions, that are really easy to set up and gives a lot of different ways to notify me. Because I can see, that some things I want to monitor is more urgent than others. Some things I probably would like to get like a Telegram or Slack message about and others maybe I just want to have some way to capture, that and look at it whenever I feel like. So, I like the all the different options, that you provide to get notifications. Because it's not everything I want to be alerted about. + +[27:00] Some things I just would like to see what happened. But. But but yeah, for sure some things need my attention right away. Just one question. I think you kind of mentioned it also with the relays. But is there any way to invoke a smart contract. If if something is happening? Let's say some things I think you mentioned it a little bit like let's say something is going on, that I, that is triggering an event and I maybe want to pause something. If this event happens is there any way to do, that? Yeah. So, that's where the relayers come in. The relayers give you kind of like the tool to actually like. Then pass this token contract. So in itself the monitors are not able to do like onchain transactions but, that's kind of where the relayer component comes in. So you would be able to define the custom script, that I talked about where you include a relayer. And then like call the relayer to do a onchain + +[28:00] transaction for example a pass function call within a contract. So I think like end of Q2. So one and a half month we will have Soroban support. Great. I think we should do another session. When when the wheel is ready. Yeah sounds good. Thank you for joining today and thank you for everyone who joined the call here. We'll see you next week. Yeah, thanks everyone for listening in. Bye bye. + +
diff --git a/meetings/2025-07-10.mdx b/meetings/2025-07-10.mdx new file mode 100644 index 0000000000..2f965611a8 --- /dev/null +++ b/meetings/2025-07-10.mdx @@ -0,0 +1,175 @@ +--- +title: "Fifo's Scaffold Stellar" +description: "An overview and demo of Scaffold Stellar, a developer toolkit that accelerates building Soroban smart contracts and frontends on Stellar using built-in CLI tooling, local networks, and contract explorers." +authors: [carsten-jacobsen] +tags: [spotlight] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This Stellar Developer Meeting features Fifo from Aha Labs presenting **Scaffold Stellar**, a powerful starter framework and CLI plugin designed to dramatically speed up development on Stellar and Soroban. The session walks through initializing a new project, deploying and interacting with contracts on a local network, and using the built-in frontend tooling for rapid iteration. + +Scaffold Stellar targets both newcomers—who want a smooth onboarding experience—and experienced Stellar developers—who want to skip boilerplate and move straight to building. + +### Key Topics + +- What Scaffold Stellar is: + - A CLI plugin for Stellar CLI plus a full starter repository + - Includes Soroban smart contracts, frontend, tooling, and plugins + - Designed to bootstrap projects quickly with best practices baked in +- Installation and setup: + - Install Stellar CLI and the Scaffold Stellar plugin + - Initialize a new project with `stellar scaffold init` + - Built to work seamlessly with **Quickstart** for local development +- Project structure: + - `contracts/` — Rust-based Soroban contracts (workspace-style) + - `packages/` — auto-generated TypeScript clients for contracts + - `frontend/` — React + TypeScript app pre-wired to contracts + - `debug/` — contract explorer UI for invoking and inspecting contracts +- Environment configuration: + - Central `environments` file defines RPCs, networks, accounts, and contracts + - Supports multiple environments (development, staging, production) + - Automatic account creation and funding for local development +- Build and watch workflow: + - `stellar scaffold build` builds, deploys, initializes contracts, and generates clients + - `stellar scaffold watch` rebuilds and redeploys on contract changes + - Frontend auto-refreshes as contracts or clients change +- Built-in contract explorer: + - Invoke, simulate, and submit contract calls directly from the UI + - Strong typing and validation based on contract specs + - View results as XDR or JSON + - Inspect contract metadata embedded in WASM +- Metadata and documentation: + - Supports contract metadata via `Cargo.toml` and macros + - Rust doc comments surface directly in the frontend explorer +- Generating new contracts: + - Generate from OpenZeppelin examples or via the OpenZeppelin wizard + - Easily add new contracts to the workspace and environment config +- Wallet integration: + - Uses Stellar Wallet Kit (e.g., Freighter) + - Environment checks ensure wallet network matches app network +- Developer experience highlights: + - No need for separate tools like Stellar Lab during local dev + - Fast feedback loop for contract changes and frontend integration + - Ideal for hackathons and rapid prototyping + +### Resources + +- [Scaffold Stellar Repository](https://github.com/theahaco/scaffold-stellar) +- [Stellar CLI](/docs/tools/cli) +- [Stellar Quickstart](/docs/tools/quickstart) + +Scaffold Stellar demonstrates how a thoughtfully integrated toolchain can remove friction from Soroban development—helping developers focus on ideas and logic rather than setup and wiring. + +
+ Video Transcript + +[00:00] Hello and welcome everyone to this week's Stellar Developer Meeting. Today in the studio I have Vivo from Halaps and he's going to give a super exciting demo of the pro current progress of the scaffold Stellar. So please introduce yourself and yeah give us a little bit of just a small introduction to what is scaffold Stellar to those who have not played around with it yet or haven't heard a lot about it. So yeah awesome. Yeah thanks. So much Carson. So hey guys I'm Fifo I'm work at a labs. I'm a principal engineer there and I'll be presenting the scaffold Stellar. For those, that don't know about it, it's basically it includes a CLI and a base project in, which you can start developing on Stellar and Soroban in a very easy way. You can think of it as an all-in-one repo and a lot of very + +[01:00] helpful tooling around it for you to build your contracts, deploy, test it, have your own environment configuring the way they best suit your application and really move fast at these initial process and the whole development cycle. So, I'm very excited to share this with you guys. Yeah. And. When when I heard about the project for the first time, I thought this is a great way. If you're new to Stellar, great way to get started. It has a lot of cool plugins. But as I dig deeper into it, even as a more experienced developer, I actually see a lot of benefits from using this to kind of bootstrap my application. If I want to try something new or build something where I don't want to spend a lot of time on setting up my environment and do basic stuff. So this especially I think some of the open sllin integrations are super exciting as well. So. So I the more I look into it the more I think this is not only cool for new developers actually also more experienced developers who just + +[02:00] want to get going and build something without spending a lot of time on setting up their environment. Exactly. I think you said it all and it's what I think it's great about it is, that it helps not just new developers, that are coming to the ecosystem and don't really know Stellar, that well. So they might have like those common initial hurdles. When starting projects and start playing around. But it also helps like seasoned developers, that know the ecosystem. Because you can really customize stuff around to make like work in the way you feel more comfortable and can really move fast and try things really fast. So I love it. I've been working ser for some time and I've missed this kind of tool in the past. Yeah. And, that's, that's one of the things, that I really like about it. Because all of you guys who are working on this building this you're very experienced on the Stellar platform and you make things very customizable. So you can set it up and you can customize your settings your features what you want to include. So. So it's nice to see + +[03:00] that more experienced developer approach to it as well. But you don't have to set up a lot of stuff. If you just want to get started and want to play with it out of the box. But. But let's get into it and see what you guys have been working on. Awesome. Awesome. Just double checking. Can you guys see my screen? Is everything okay? Awesome. So sometime people's complain, that my monitor has like big resolutions. If things are too small, just let me know. I can increase it a little further. So I'll just start with the basic. Here you're seeing the link to Stellar CLI and I'll take these steps back and start from the very beginning. So. If you're a new developer coming to the ecosystem not sure. If you heard it up it already. But Stellar CLI it's a great tool everyone should have it and it's the starting point for what we're doing here. So I recommend you go here to Stellar CLI get the CLI installed and we'll start with it. So Stellar CLI has a lot of very exciting features. You can manage accounts. You can spin up + +[04:00] your own container with a local network. You can do a bunch of really cool stuff. But the nice thing, that just came in recently and a lot of people might not have realized in past releases is, that. Now we have plugins for the CLI. So. If you have the CLI installed and you just go ahead and hit Stellar space plugin and let's call search, you'll be able to search for plugins, that are published in the ecosystem and, that can be installed on the CLI. So the idea behind, that is, that different projects might have specific needs or might look for more personalized tooling for what they're building and this way you can really customize your experience around it. So what we're seeing here is basically it went through repositories, that have a specific tag it looks for. I mean these were zoom in a little bit. It's it's a little bit hard to see. Yes, absolutely. Yeah. Is it better like, that? Like, that it's huge my screen. Yeah. Yeah. Yeah. Now it's. Now it's really good. It's very easy to see. + +[05:00] Awesome. Awesome. So basically it just found for hits and it's looking on GitHub for a specific tag to indicate this is a plug-in for the CLI and we can see a bunch of interesting stuff already. So there is the scaffold Stellar it's in a repository called a lababscaffold Stellar there's also you can see the contract bindings there is the open zeppelink upgrader. So the cla for contracts and we should be seeing a lot of a lot more exciting stuff coming next. So for this demo we'll go with the scaffold Stellar I already have it installed I'll just open the repo here you can access GitHub and iabscaffoldstellar and you can go through the instructions here to It's very simple. Just cargo install and you have the CLI. I already have it here. So, it's probably just Yeah, there you go. I have P V 005. We also have the registry, which is something really exciting. But I won't be focusing too much on this demo in function of time. So, I think we can have subsequent sessions just to go deeper into some of the + +[06:00] features, that won't be here. I'll try to focus more on initializing the project and playing around with it. So, I have it installed. Once you have the plug-in installed, you can access those through the Stellar CLA directly. So I'll go with Stellar scaffold. Just hit h to see the help and you get a bunch of new commands with the this plug-in. So in it will allows you allow you to start a new project from scratch and it comes with this pretty exciting model repo, that you can manage your contracts. You can build the contracts, generate the clients, have you have a front end, that's ready to interact and integrate with these clients. So it's really neat. You have a build function, that not just builds the contract. But also runs certain routines, that I'll be showing afterwards, that are associated with the configuration you have in your environment. You have also the generate command, which allows you to generate new contracts. So these allow you to go through the open Zeppelink examples and it can send you to the OpenZeppelin wizard + +[07:00] and there's a bunch of new stuff coming in the future as well. You can upgrade existing repositories for the scaffold turning into your workspace into a scaffold project. There's the update just to update your environments in your working space and also watch, which I'll be using a lot in this demo, that builds everything and keep watching for changes on both sides. So both the contracts and also the front end. So for this let's go ahead and start new project. I'll just go with Stellar scaffold in it and let's call it demo 10. I think I have a bunch of demos already in this folder. Because I was playing with this throughout these past few days. Let's just see. Yes, cd. I'll just open this new project. And you can see here, that it downloaded the repository from scaffold Stellar front end. So. If you want to have a look at these or even submit PRs. If you find anything to improve feel free to do, that. So I'll go ahead and see the demo. I'm in the in + +[08:00] the folder and let me open it with VS Code. So there's my VS Code. Let me just position it here and really increase this. So you guys can see what's going on. So all right, starting in this new repo. What do I have in here? What where is this structure? What am I seeing in this? So just a quick walk through what you're seeing here on top you have a folder for contracts and this is basically where your contracts are going to live. They are following this format in, which you have a directory for each f for each contract you have the cargo tommo source and out of the bat you can see we have three examples. This is something, that as is in this current format and also it's important to bring the disclaimer, that everything here is a work in progress. This is very recent. So. If you see any bugs or anything, that we could be improved, feel free to share your feedback. We appreciate and there's definitely a lot of stuff, that will be iterating and improving over the new releases, the following releases. So you see under contracts + +[09:00] here, I have a fungible token from the demos. I have a hello world example and I have the NFT innumerable, which is also different example of contract. So contracts is where your rust code exists. This is all a big workspace and you can see the source code directly. If you scroll a little bit, you see packages. Right now, this is empty. So where is this packages folder? The idea is, that as I'm building these contracts, we are automatically going through our configuration and deploying these contracts and generating the TypeScript binds. So we are creating clients for these in Typescript and putting those in this folder. So all the clients, that are generated for the contracts will come to the package folder and, that's where they're going to be accessed from. So. If I go down below you'll see, that we also have a front end here. So this is a ReactJS with TypeScript and D and here in this front end basically it is a very bare bones and simple front end to start + +[10:00] playing with your contract. But it also includes some interesting thing the bat. You're going to see it has some providers and some hooks, that you can use in your application and you can see the actual implementation we have in place to play around with those. You also have the contracts folder, which for. Now it's empty. But as soon as I start building the contracts you see it's being populated with the clients for each contract and you have the debug folder, which is this interesting contract explorer, that is very similar to what we have in Stellar lab and I'll get into more details in a second. So for. Now it's bare bones. We don't have anything. So what can we start doing with this? The first step, let me go ahead and just open the rhymi here. And it suggests us to start with this copying the amp. So I'll just copy the example end. Now we have the end. And this is basically telling us, which environment are we using. This is where our front end is going to look + +[11:00] when connecting to our RPC and to our Horizon. Oh, and I didn't mention what I actually Let me just show here. Because I'm. So used to have this up and running. I'm already running my local image of the quick start. So. If you never heard of it. If you don't know what it is, basically in the Stellar CLI, you have a container. You can run Stellar container start and you start this container locally, a docker container with an image, that has all, that you need to run Stellar locally. So you have your own node running locally from scratch pream pristine node history. You have your own Horizon. You you have your friend bot. So it's really handy. Right. Now I'll be using it to connect and deploying everything locally in my environment. And just an example I was showing the commands from the scaffold CLI. So I'll just + +[12:00] to show you these relationship between contracts and packages. I'll go ahead and just run a build. So you can see how it does. Basically. If I access Stellar scaffold build let me just do the help. So you can see all the options. So basically you can build for a specific amp and it's going to look for this Stellar scaffold and, which points to, which environment you are building your contracts. I have mine already set here to development. So you can build your contracts for different environments. And what these are doing is, that they are also looking into this file over here. This environment to this is very important for this scaffold project. Because this defines how you're handling this environment. So I suggest you all have a look and play around. There's a ton of comments here to guide you through this configuration. But in short summary, what you're seeing is you have three main environments. You see here we have a development + +[13:00] environment with the configuration for the RPC, that we'll be using the passphrase setting up to run automatically. So. When this flag is set for example and I'm running npm rundev, it's going to automatically start the container for me. So I don't have to do, that or in case I forget. I do this all the time whenever I'm working on my projects. You also have some other configuration around the accounts. So this is also very neat. I'll get to into more details. But important things here is, that you also have other environments. You have the staging environment. So normally you would have development pointing to test net or future net. And. Then you'd have production pointing to mainnet. So I won't make any changes right. Now just to show you guys. If I run the command to scaff Stellar scaffold build with the flag build clients. There you go. It's it's starting to run. You see, that it's doing a lot of + +[14:00] stuff. Let me just break it down for you. So basically what it's going through all my contracts building each one of those. As they're building the contracts they're also looking at these environments to see what I have set up here. So important pieces. So first thing you'll notice you have this development.ac accounts, which defines, which accounts are going to be used and by default you have this with an account called me and the full true. What does, that mean? That means, that. If you're familiar with the Stellar CLI you have the Stellar keys, which allow you to. If I just hit ls here I can you can see the keys I have. It allow you to manage key pairs directly from the CLI. So you can create new key pairs, you can initialize these accounts and you can use these accounts to run transactions directly through the CLI. So the nice thing here is, that as you set these accounts here automatically doing the build, it's going to identify. If I have an account called me or not. + +[15:00] And in case I don't, it's going to create this account and initialize it for me. And with the default equals true flag, it is going to use this account to deploy the contracts as the source of the transactions. So for example. If you are spinning up a bunch of contracts, which have the same admin, you could set up this as the admin account and have the admin deploy all the contracts. Down below you have here also the development contracts, which specifies, which contracts the build command should build for these environments. So here for example it comes out of the bat already preconfigured with the contracts we have here as examples. So you have the hello world and this inline syntax saying, that I want to generate the client for it. So basically the hello world down below we have the fungible token, which also is generating the clients. But you can see down below it has something else. You can already set the constructor args directly into this format and you can use the accounts, that + +[16:00] are specified on top to be used parameters as arguments. So here it's already invoking the constructor arguments sorry the constructor function for my fungible token setting the owner as the MI account and an initial supply and the same thing for the NFT contract as well. It's set in the owner and you can also play around with these. Because you can add post initialization commands. So you can have for example this after deploy and you can chain new commands after everything is deployed. If you want to run a specific configuration. So let's say, that after deploying everything and initializing the contract you already want out of the bat to mint some units to send to some accounts to send around some tokens. So you can do, that very easily. So you see a lot of stuff ran here. Actually I ended up jumping a few steps. So we just have the contracts and the packages and you can see those were generated based on the contracts we have specified here. So all of the TypeScript + +[17:00] packages and this also filled in the contracts for my front end. So we can start playing with this front end. All right. So let's get back to repository. I just initialized this project. What are the steps, that I need to do as a fresh project? So we just copied the MV. Now what we need to install the dependencies. Because we have a bunch of dependencies, that we are going to need for example to run the concurrency. And so on. So npmi just run the installation. Wait a little bit. Let's just close this other one for a second. Okay, everything's installed. What is the next step? Npm rundev. So as you run npm rundev, you see a lot of things going on here. And this is where I think the magic lies. So, and yeah, run, that. And a lot of stuff is running. I'll just go back and show you as it's scrolling down. So, you see, that out of + +[18:00] the bat, it starts concurrently two commands. It starts Stellar scaffold watch with the build client flag and also vit. So, these go two are running in separate process. You can see right away, that vit's running here on number one and on number zero. You can see the whole building the building commands, that I showed before. So I'll go ahead and click here to open the front end. It's already there. And let me just see. If it finished building everything we had already built. So it's basically just repeated everything. But there we go. We have our front end. All right. It's a very simple one. It starts with some instructions telling you what you can do and how you can do. But let me just break it down what you're seeing here in your interface. Back. First thing you notice it came automatically connected to my account. Why is, that FIFO? Well, that's mainly. Because I have been using this already and we have some account management logic here, that stores locally in the local storage the account + +[19:00] that is last connected. So once you come back it's going to identify it has been connected and just reused, that one. But I can just come on top here and switch accounts like or I can come here click on something. There you go. Click and disconnect. If you want to see what is the fresh start for the first time you access. What is important to note here I'm using freighter and. If you're using freighter you should set it to a custom network. Because I'm using the local one. So. If you never done, that in the past you go to settings navigate to network and you can set here a custom network. If you're running on test net or mainnet you're good to go. Because you can simply select the networks, that you're running. But in case of a custom one, you just need to input the same parameters we have in the end. So what is your RPC URL? What is the passphrase and allow for HTTP connections? There is something on my local environment here and I'm still trying to figure out what is going on. But sometimes it + +[20:00] struggles with freighter and it doesn't show up the proper balance. But I see it's something related specifically to this environment I'm running. But it's going to work. Everything is fine. I'll be showing to you guys. So. If I click here on connect, you see, that we're using the Stellar wallet kit. This is a nice library we have in the ecosystem, which basically connects to several different wallets in the ecosystems and it's just one integration you make and you're good to go. So, here I have these two wallets installed here. But it supports way more wallets. I'll just go with Freder. And wait a second. Did I click it? Oh, something's going on. What happened? Oh, there you go. Not sure what happened. I think my computer is frying. Because everything just slowed down. Oh my god. I'm not sure what's going on. Let me just double check. If it's there's an issue here. Right is not available. Oh, what is going on? Did my did my plugin just die? Oh, what the hell happened here? + +[21:00] Let me just Yeah, I should definitely open the tickets with Fred. Let me just try and kill everything from scratch. Oh, no. I can't do, that. Otherwise, I'll be out of the session. Let me just try it again once. is there a way I can maybe restart the extension?, where is Can I just do this and bring it back? Let me see. Oh, there you go. It's popping me for my password. Is this correct? There you go. Okay, I think it's going on now. Not sure what's going on. I'll definitely open a ticket for the demo gods blessing us. Okay. So I connected my freighter wallet. What you see here, it automatically brings the balance and we have this small indicator here in the + +[22:00] top, which checks what is the environment, that you have set here in your local environment and it indicates. If you're connected to, that or in case you're not in case I'm connected to test net for example in the wallet it's going to throw a different message and indicator just. So you know, that you're not in the intended environment. Because we want these front ends to be built for a specific environment. So here I'm connected with my FIFO wallet and there you go. What can I do FIFO? What can this front end do for me? What can I use this for? So the nice thing starts once we click on this button over at the top. So. If you're familiar with Stellar lab the laboratory. You have a functionality there called I think it's called contract explorer as well, that has the invoke contract tab. Basically what we did is, that we ported a good part of, that implementation over to here. And a big shout out to the team from Stellar Lab, that helped us through the process. And what you're seeing here is, that as I + +[23:00] open my front end, it automatically goes through my packages and the contracts I have here. They load the wasmas, they bring the whole spec for the client and show me this interface, which allows me to directly interact with the contract in my local environment. So what you're seeing here are the three contracts I have here. I can simply click and switch, which contract I'm interact with and I can start invoking transaction invoking functions from here. So I'll just go and show some examples. Let's go through the hello world. And. If we check the hello world contract, basically what it does is, that it has this hello function, which receives an argument for two and say hello to the two arguments. So I'll go ahead and just say fo and since I have my account connected I can both simulate or submit. I'll break those into details. So I'll start just with simulate. If I click simulate it's + +[24:00] basically running the simulation of the transaction. And you see some stuff already changed. So okay, what changed here? First thing is it is using my account, that is connected here as the source account of the transaction and is simulating an invocation with the parameters, that I'm putting here. You can see on top, that it showed this flag to indicate it is a read invocation. So, that means, that. If I go ahead and submit this invocation to the network and it is processed, there's no change in state. We're not changing any state of the contract. So there's no real reason to actually submit and pay for the fees. If we just want to fetch some value or see some result, we can just simulate and see the result, which is what we have down below. Here we have the response and the result of the simulation. So it is a successful one and you can click here on show details and you expand the whole view and you have the entire response. So. If you scroll down. If you're familiar with this, you see, that we have these results object and here we can see the return value. So it's saying hello FIFO + +[25:00] with the parameter, that I use. So. If I say something different like hello John I'll simulate again. Show the details. Go down below and there you go. Hello John. Awesome. What else can you do? Let's try a different thing. Let's for example let's switch here and make a new function and let's make some changes. Let's store a value. So, I think things start to get interesting as you start playing with your contracts and change things around. So, I'll just store let's say what can we send here? Let's send an address. and it's address we don't return anything and we'll just store these in the storage. So just go ahead. M do storage. Oh, there you go. Copilot. Let me just double check. Set. No, this is wrong. Damn it. + +[26:00] Copilot. So storage instance. Let's use instance for now. Is I have cold hands. Instance set. Okay. Let's use a simple key. So single short. I think I might have to import this single short. Let's just call my key for now. And the value will be the address, that we're sending. Okay, let me just save. Let's see. Where am I? Oh, this should be There you go. Let me just save it again. I think this is good to go. Let's wait a little bit. You see, that as soon as I'm saving everything is running again. So, it's watching for changes in real time. And as it identifies, that I changed the contract and it builds and it notices, that was different. Then it runs the whole routine. So it redeploys, reinitializes. And this is very powerful. Because it means, that as I'm + +[27:00] making changes, it's identifying. When it's relevant and it redeploys. It regenerates everything. And. If you paid attention, you should have seen, that the front end already updated here on the side. So I already have the starv showing up in here. So, that's really neat. Because it means, that as I'm making changes, I can already invoke from the front end. So the star val here allow me to send an address. So I'll just copy my on address from here. We should definitely add the copy button here. So I'll just paste it here. I'll simulate. I see, that I can send it. I can submit this. But. If you notice here on top now, the badge is slightly different. Let me close this. You see. Now it indicates it's a right invocation. So this means, that based on this simulation. If I submit this I'll be changing some value and storing some value in the contract. So I'll go ahead I'll click in submit and it's popping me up to sign. This is just. Because of the issue I mentioned here in my environment. It sometimes don't know I have balance. But there you go it was executed and here it + +[28:00] is. This is this oh damn it one password it's going to be popping up. So what you're seeing here is the result of the submission and this is slightly different. So it has the same compact version. But I can click on details and I can expand. Oh should I just close this? Can I just Yeah, let me just finish it. There you go. One password. So what you're seeing here as the summary you have the hash of the transaction have the ledger number for. When it was processed. You have the transaction envelope in XDR. But you can also click here and quickly switch to JSON and view the JSON version of this object. The same thing for the result. You can convert it to JSON. And so on. So, this is really nice. Okay. Oh, did it just reopen? That's awful. Okay. Oh, it's. Because of this one. Let me just how can I just disable it? Let me just go to the extensions real quick and disable this guy. So it doesn't pop up on + +[29:00] our face. Okay, I was talking about the submission. So, I just submitted made a change. What else is important to highlight here as you're playing with these contracts and you might have noticed already all of the functions are loaded here with their specific arguments and the specific types, that these arguments are in. So for example. If you're looking here to the hello, it expects an SC string. So it's only accepting strings. So as I'm typing here, it's validating the input, that I'm providing. So for example, here it expects an address for the store. If I put anything else other than an address, you'll see, that it's validating and telling me, hey, this is not a public key or a contract ID. So it needs to be a valid address. And you should see this for all of your functions, all of the known types. So. If you go here over the fungible token for example, you see we have all of these types indicated here along with the fields. So we have here I 128. And so on. And for functions + +[30:00] that don't have like arguments don't expect arguments you just see this compact version. So you can just check and simulate like hey is this paused? Yes it's not paused returns false. Awesome. This is really useful. Okay what else? What what else can we see here? Let me take a step back and see what I took notes. See. If I'm not forgetting anything. Oh, yes. As you're seeing here on top, we have the name of the contract and the contract ID. You can just copy it. If you're playing around making direct invocations in the CLI. But you can also click here on show details. And, that's very interesting. So, what you're seeing here is the additional data about this contract. So here you have the contract was hash. If you want to play with the deployed was version. You also have the contract metadata and. If you're not familiar with what is the contract metadata you can just come here and click to see the documentation and read a little bit more. But in essence these + +[31:00] are some pieces of data, that are automatically embedded into the was, that included into the was during the compilation. But you can also add your own custom metadata depending on what you want to send to the deployed version of the contract. So basically what we're doing here is, that in this front end once we are going through the our clients and identifying, that we have clients for these contracts we are also getting their wasmas getting the was hash getting the wasmas and decoding the was to extract this metadata automatically. So. If you connect for example this to contract deploying test net this is also going to work and pull all of the metadata related to, that contract. Let's just go ahead and make some changes and add some metadata to this contract. So let's go into this hello world contract. It has just the basic metadata here indicating the version, the SDK version, the protocol version and release. So I'll just go ahead and there are a few ways in, which + +[32:00] you can do this. One, that is very simple is directly through the tommo file. So I'll just copy here and go to the tommo file and I'll add this section here for package metadata.Stellar. And we have some different properties here basically indicating this is a contract. I have some custom ones like I have my custom string fu my custom boot through. We have authors we have the repository the homepage and all of these are going to be baked into the contract during the build. You can also beyond this you can also come here and this is something you can do in your own contracts, which is to use the macro contract matter there you go let me just make sure it's imported and for contract matter all you need to do is to form a key, which I'll just say my key, that's more than enough and a value. So for the value I'll just call + +[33:00] I'll just say fifu oh yeah there we go. Let me just save it will also rerun everything. So let's just wait a little bit and as it's doing basically everything is going to be included into the was during the compilation and once we load it in the front end we are going to decode from the wasp and present it show in the front end. So I think everything has run here. Let me just go and there you go. You see we have a bunch of new stuff popping up now. So all the keys, that I have included through the tommo plus the key I included here. So let me just see my key foil here on top is of type meta entry. We have the other traditional ones. We have the outs, that came from the tommo. We have contract true the home domain. And so on. So, that's very powerful and. If you want to reuse this in your project you can just grab these components and reusing the front end app. And in the same way something, that we were conscious about here is, that all of this contract explorer + +[34:00] capability and functionality is not something, that you want to ship to your production environment for example. So everything, that you see related to this page is tightly organized within the debug directory. So you can easily just delete this debug directory and extract everything and you'll be have a clean project. And also. If you want to just reuse some bits, you can just go select the components and reuse those in your project. So moving forward, okay, we made some changes. I updated the metadata. What else can we do? So you remember I was telling you about the environments and let me just go ahead here and find the file environments. Which is the file, that allows us to make some configuration about the environment we're working on. So I'll just go ahead here and have a look at the NFT one. And for the NFT one, we see, that all it has said is we have we want + +[35:00] to we want to build clients for this contract and we also are constructing this with me the MI account as the owner. So what. If we also wanna let's say men some units of it. So let me go ahead and grab this, which is the example we have here and I basically am going to add a after deploy and here we can just chain different commands we want to run after the deploy. So here in this example I'll just going go around and. But let me see what is the actual syntax for this contract. NFT innumerable contract and the constructor. Okay. So it's a nouner and it's an address. So, that's where sorry not the constructor, the mint. So it's two and an address. There we go. Let me go to the environments. So mint two. And then an address. I'll just delete this. I just give the name of the address and + +[36:00] there you go. It should be able as I make new deployments, it should be able to do it. Now, even, though it's running, I didn't change anything in the contract. So, it's not going to deploy a new was for me. So, I'll just go ahead and make a change to this contract. So, one thing, that we can take advantage of this update and I already highlighted, that we can also add rest docs here as comments. So for example we can say, that this no it doesn't returns the owner the contract no copilot this is a mint a new token to specified address color must be. And so on okay this is good enough let me just leave these two lines. So once we have these docs on top of the functions and we save these are going to be included in the w and in the spec of the contract. So. When we are loading the front end, we are going to identify this and show those alongside with the functions. So let's just wait a second. + +[37:00] It's running everything. You should be seeing this sometimes blinking a little bit. This means, that there's a lot of stuff going on and the vit is watching for these changes and just updating everything. So running. I'll just for let me just double check. I think everything executed. Let me just have a look. It's the enumerable. Let me just see. If Oh yeah, got to have a look why it didn't just up there automatically. But there it is. I put it into lines. This is very small. It should expand a little bit further. So a lot of stuff to take notes as improvements. But basically it brings the rust doc here. We have some indications and it's going to always show up with the functions, that they are attached to. So, okay, I added the step to mint and already send, that to the address, which is what is the name of the address? Me. So, let me go ahead and get this address. What is the key? So, Stellar keys public + +[38:00] key me. No, isn't, that Oh, I added a K and again a typo. Oh, yeah. But it worked. So, this is the MI account. So let me check. If it has a balance. Simulate. There you go. We see read successful. Let me check the balance. And it has. Here it is. Return value. It has. It has a balance of one NFT. So. If I check I think we have another function here owner off and the token ID. So we know, that this is the sequential token. So the token number zero will be the first one. Yes, I can simulate and see, that the owner is the address of my admin. Some other stuff you could be doing and it's very helpful here. So, I use this key. But sometimes I don't have those in my wallets. So, you can use Stellar keys to also grab the private keys and import in your wallets. If you want to run some + +[39:00] functions for example from the admin perspective. So I'll go with Stellar keys secret. If I'm not mistaken me. There you go. That's the secret of the MI account. So I'll just go ahead and freighter. And for freighter we have here you click on this icon on top. You see your accounts and you can easily click on import secret key. I just paste it. And remember. If you're seeing the secret key publicly in this recording you should be using this for real. Because this has been exposed. Let me just put my password. And don't forget to click on this checkbox and I'll just import it. There you go. AM2 I. Yes, that's correct. That's the account. All right. So, I'm connected with my admin now. So. If I want to play around and I want to make transactions or means from the perspective of the owner, I can do, that. So, I just go ahead for example, grab the address of a different one, which is my FIFO account. I'll just come here, + +[40:00] copy, and again switch to my admin, which is this one. And let me mint a token to my FIFO account. There we go. And one important thing here, you notice, that right away I can click on submit. You don't have to manually simulate. You can always simulate to see beforehand what will be the outcome. If it is likely to fail or not. But you can just come here and directly click on submit and it's going to run the whole workflow. So it's going to configure the transaction, simulate it, check the result, prepare the transaction, sign. And then submit. So I will just click on submit. You'll be prompted to reveal and sign, approve and continue. Yes, that's correct. Sign transaction. And there you go. It was successfully executed. So. If I grab my FIFO account here. Now and check for the balance and simulate, I should be able to see, that it holds a token. Oops. Too + +[41:00] much scroll. There you go. Return value number one. It means it's hold holding one NFD. And. If I check for the owner of the number one, let me see. Show details. Who is owning the NFT number one? There you go. The FIFA account. Awesome. Awesome. Okay, Fifo. But you're just showing the examples we have here. You just made small tweaks. What else can we do? What else is in there for us? So, another nice thing, that I want to show here today is the generate. So. If you go to let me just close let me stop this for a. While and close this one. So you guys can see this. let me just go to Stellar scaffold again. Show the commands. These are the commands we have. I want to show you the generate one. is to generate new contracts in this repository. So let me go ahead Stellar scaffold generate and let me see the options we have here. So basically + +[42:00] sorry I can generate contracts. So generate contract h and there you go. So the options I have here I can clone one of the examples existing in the open zapping repository. And. If you're not familiar let me just make sure I have it here. This is the repository. It's managed by open zapling. They have some very nice examples over here, that you can have a look. So basically you can choose any one of those and directly clone into the to the repository. You also have the option to list the existing ones, which will show the list of the ones in the examples. You can also go to the wizard. So it's going to pop up. Let me show you. If you hit contract, oops, from wizard, it pops up the wizard from open zap link. So, this is a really nice wizard, that allows you to combine different features. So, I want to make a token, that is mintable and it's burnable and pausable. So, it adds all of these + +[43:00] characteristics and all the markers and you can just grab it and bring to your project. But what I'll be doing here is basically using the other option, which is I'll go contract ls. Oh sorry it's a ls and here I can see all the available contracts in the open z. So I'll go ahead and copy one. Let me see. I'll get let's use the fungible CAP. If I'm not mistaken. I think, that's a good one. Let me just again check it's from. Okay. So I'll use from. So stylish scaffold generate contract from and I'll add it's fun how do you write it? Fungible CAP. Fungible fungible CAP. Is, that correct? I hope so. Yes. Let me see. Yes. We have a new contract here. So. If you look here we have the contract fungible cat. It was it was imported in our repository. So let me go + +[44:00] ahead and also add it to our configuration. So it can be built. And then we can generate a client. And so on. And let me have a look. Does it have a constructor? Yes, it has a constructor and expects a CAP interfere. So let's go to our do we have it open? No. Where is it? Environments. I'll go ahead and add a new contract. So I'll use the same syntax we did for the token for the fun token. I'll just copy this. Let me put it at the end. So I'll have a new one. And this one is funo kept. It's important, that it matches the name. Let me see how it's set in the tunnel. Where is the tunnel? There you go. Example. So it's fun CAP example. Yes. But it needs to match the final file generated. So it should have the underscore. So, French ball CAP example. + +[45:00] There we go. Client true. And we also want to invoke the constructor right afterwards. But it accepts a CAP argument with an integer. So, I'll just say the CAP is 10,000. Just an arbitrary number. And let's save. Let me see. Oh, sorry. I had stopped the command here. So, npm rundev. Let's see. If it identifies the changes right away and runs it. It's running everything. Let me go back here to see. There we go. But I think it didn't deploy it yet. Let's wait a little bit. Is it running? Let me see this teller. Hello. And. So on. No, I think it's stopped. So, I should have run the command before I updated it. Let's just make a small change. So it detects the change. I'll just add some comments here. Maybe on this one. Oh, no. It's still running. Sorry. There you go. + +[46:00] Still running. Everything updated. Blinking for a, while. Don't worry. This happens sometimes as we are removing and adding packages. Let me just double check. If this is. Because we added a new package, that not was not tracked. I know there is an open issue, that the team is working was working today around how we are tracking the newly generated packages. So sometimes we add a new package and it might not identify in the workspace. So we can. So we just run the npmi here at the root level and it will identify. This is a quick workaround. But I know there they're patching it as we are presenting this. And there you go. That was it. It's loading everything. There you go. We see a new one here. Fungible CAP example we have our token it does it have an admin let me just double check no it doesn't have an admin so, that means, that I can just m with any account. So let me check the total supply before we start invoking this + +[47:00] contract and the total supply is indeed zero. So there are no units in existence or running around. So let's go ahead and mint some. So I'll just mint to the to our admin or to our main account directly and you open here. Copy. Let's just use this one amount. Let's just use a normal amount, that's under the CAP. So, this one should be okay. Because it's under the CAP. Loading. There we go. Execute it. Submit it. If I get this account and check the balance, simulate. Show details. And I run down here. 5,000. So, the balance is there. It was minted. So, let's go ahead and try to mint 50,01. And this. Now shouldn't go ahead. So. If I click both simulator submit, it fails. Because the simulation fails. Because we are hitting the CAP. So, we should be + +[48:00] able to see here the details of the failed simulation and what happened. So as you see we just added a new contract started playing around and you see things gives us a lot of speed. Because you can just start invoking everything from the client and seeing the changes right away. So I think this is very powerful and should be very helpful for new developers coming getting up to speed and seeing how the implementation is managing all these workflows and you should be seeing some improvements coming soon as we gather feedback and start adding more functionality to it. So, I'll pause for a second and take a sip of water just to lubricate my vocal cords. And Carson, I'm not sure. If we have any questions or. If like to open up for anyone. I think we pretty much caught up questions, but. If anyone has questions, please feel free to ask. I think this is a really great demo, that shows how much progress you've made on this. I love, that the tooling is built in. It makes it super easy to debug and to develop + +[49:00] and add on to your smart contracts. You don't need to have a ton of tooling set up. It's all built in. It makes it very easy. So yeah, I downloaded the and ran the latest, that's at least the public one last night and it was super easy to get up and running as soon as I figured out, that I needed to have my Docker running. I didn't have, that. It was on pause. So after, that everything was pretty smooth and I and there was a comment about Stellar Lab you can actually connect Stellar Lab to quick start, that works even. If you have quick start and you have this running on your local machine you can still use the web-based Stellar lab. But but I think most of the functionality, that, that I would be interested in you already exposed those here like the + +[50:00] smart contracts, you can invoke them and you can see the output. So most of the functionality I use in cell lab is already included in this version. So yeah, amazing job. Awesome. It's great to hear, that and I invite everyone to play around, test, share the feedback, especially people participating in the hackathons. Like this is a great starter for the hackathons. They can really get up to speed. Yeah, I think for HackMeridian, we definitely need to do a big push to get people going on this. I we always see. When we do hackathons, we always see some struggle a little bit setting up their environment, get everything installed and sometimes they maybe spend an hour getting things up and running instead of starting to code or to decide what to build. So this is really a fast track to get started working on your project and start coding. Yeah. And and we have some hidden gems, that I didn't even touch here. Because of + +[51:00] the time. But. If I really suggest you guys have a look. For example, we have some hooks, that allow you to directly listen for specific events. So. If you're playing with events and meeting events in your contract, you can already in your front end start listening for those and reacting automatically. So you could make some fancy stuff here. I think this could be a next step for this explorer to have some options to already list and see the events coming up as you're making the invocations. But this could be very powerful. Yeah. Great. Awesome. Awesome. If you guys find anything or have any feedback, feel free to share directly in the repository or get in touch with us. Yeah. How is the best way? Let's say I find a bug or something is not working. What is the best way to let you guys know? I encourage everyone to directly open a niche on the repository. Because we are definitely looking there every day. It will be the quickest way. But we're also on Discord. So on the developer Discord, the developer community, we're always there. So feel free to just tag us, me + +[52:00] or the team. We're always looking there. Great. Okay. If there's no other questions, I would like to thank you so much for giving this demo. This was a very extensive demo. You really took us far into the scaffold Stellar and it was really great. So yeah, thank you so much and I think we will probably do another update at a later time. But until. Then I would encourage everyone who builds on Stellar to try it out. It it's really impressive. I love how much is included how much you don't need to set up manually and integrating the tooling and getting to a point where you start to be productive super fast. I think, that's, that's, that's a great experience. So. So so yeah, go ahead and try it out and yeah, thank you for listening in. We will be back next Thursday. Thank you everyone. + +
diff --git a/meetings/2025-07-17.mdx b/meetings/2025-07-17.mdx new file mode 100644 index 0000000000..82a2ca0f47 --- /dev/null +++ b/meetings/2025-07-17.mdx @@ -0,0 +1,178 @@ +--- +title: "Nomyx's Advanced Smart Contract Architecture" +description: "Nomyx introduces a Soroban implementation of the Diamond Pattern for modular, upgradeable smart contracts with shared on-chain storage, plus tooling for inspecting deployed diamonds and supporting complex deployments that exceed typical contract size limits." +authors: [carsten-jacobsen] +tags: [developer] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +In this meeting the Nomyx team will give us an introduction to their advanced smart contract architecture (an implementation of the Diamond Pattern) and the Nomyx Diamond proxy viewer interface, that facilitates inspection of deployed diamonds and makes them a bit easier to work with. They explain why upgradeability and modularity matter for production systems—especially when building tokenization and regulated financial infrastructure that must evolve over time without forcing painful migrations for users. + +The team walks through how a diamond proxy routes calls to “facets” (modules) while maintaining a shared storage layer, enabling targeted upgrades (swap/replace a single facet) and supporting larger, more complex applications that would otherwise hit contract size and method count limits. They also showcase a live deployment instance of the diamond proxy standard currently used for the Nomyx platform which removes limitations related to the upper contract size limit, allowing developers to launch complex products. + +### Key Topics + +- Motivation from Nomyx’s product goals: + - Upgrade paths for real-world financial systems (bug fixes, policy/regulatory change) + - Reducing friction for traditional asset managers adopting on-chain infrastructure + - Treating blockchain as an implementation detail rather than a user-facing concept +- Diamond Pattern on Soroban: + - A stable diamond proxy address as the user-facing entrypoint + - “Facets” as modular contracts that each implement a portion of functionality + - Proxy routes calls by function selector → facet contract address mapping + - Upgrading by adding/replacing a facet instead of redeploying a monolith +- Shared storage model: + - Facets share a common storage contract/pointer, avoiding storage migration on upgrades + - Developer-facing access pattern via `env.shared_storage.()` + - Values/keys must be XDR-serializable to fit the shared storage mechanism +- Deployment flow + - Deploy via a diamond factory that creates a proxy and initializes shared storage + - Use `diamond_cut` to deploy/init facets and update selector mappings + - Macro-assisted facet initialization to inject shared storage pointers automatically +- Introspection and ops + - “Loop”/introspection capabilities to inspect selector → facet mappings + - Emitted events on `diamond_cut` and a Rust-based indexer approach for monitoring/alerting +- Tradeoffs vs standard Soroban upgrades + - Benefits: fine-grained upgrades, modular architecture, no forced user migrations, avoids code size/method count ceilings + - Costs: higher initial setup and upgrade overhead, more moving parts and concepts to learn +- Demo discussion highlights + - Example admin portal for identity/compliance workflows that can evolve without redeploy/migrate + - Framing the shared storage as critical for preserving historical state and auditability while upgrading logic + +### Resources + +- [Nomyx Website](https://www.nomyx.io) + +
+ Video Transcript + +[00:00] Hello everyone and welcome to this week's Stellar Developer Meeting. Today I have quite a few guests joining the call from NMIX. So please introduce yourself. Good morning everyone. My name is Chaved. The CEO and co-founder of Onomics where we are building financial institution infrastructure on chain on Stellar. We use the diamond proxy standard, which we will be talking to you about very shortly. And our entire objective is to allow a seamless transition of for asset managers to seamlessly transition onchain without ever realizing, that it's actually blockchain, that's running the show in the background. I pass over to Thomas and Sebastian. Yeah, my name is Thomas Brown. I'm here + +[01:00] at Nomix. Been working on Sora, a project, that caught my eye as a native Rust developer. I've been doing it. Now for about 10 years, that is to say Rust. And. So far my experience working Stellar has been incredible. I had experience working on other blockchains and being a Rust developer and being familiar with the benefits the language I feel like Stellar's system makes it really intuitive for a seasoned developer and even those with lesser experience to just jump on board. So this has been a really fun and interesting experience for me. So far. Hi everybody. My name is Sebastian Shepus. I'm the CTO here at Nomix and I'm excited to be here. We've been working on this technology for a. While and we Thomas is one of the best + +[02:00] Rust developers we know and we've had a really good time building this infrastructure. So we hope, that you enjoy what you see and find it useful. Excellent. Mason, you want to introduce yourself? Let me just wish my comments. Hello everyone. Good morning, good evening, good afternoon from wherever you're joining. I'm M Heather. I'm the head of engineering at Nomix. It's been quite a journey at Nomix, and now we see Nomix also working with solar pan and seller and my part here is to integrate, that into our overall auto deployer solution where we onboard customers within a 20 to 30 minute time frame and + +[03:00] deploy them onto their target chain and we're quickly on wrapping them onboarding them onto chains as well. Great. Thank you for the introductions. Yeah, I really love tooling and yeah, what can make your work as a developer and deploying your smart contracts easier. That's something, that's really interesting to me. I have not had a chance to play around with your solution. So I'm super excited to see your presentation and your demo. Sounds great. So to start off I believe we would like to share a little bit about the motivation for why we were developing the Soraban version of the diamond proxy standard. So like I mentioned earlier our mission at Nomix is to allow traditional asset managers and traditional finance projects to transition onchain in a manner, which completely abstracts away the complexity + +[04:00] of blockchain technology where they don't even realize, that it's blockchain running the show in the background. We don't say the B word, There's no blockchain, no crypto. It just happens to run on blockchain based infrastructure. So one of the key challenges, that we saw. When we were going out there and refining our product market fit was convincing these asset managers to transition onchain. Because everyone has their preconceived biases about blockchain technology, which is hey it's immutable and once something is deployed on chain, that can never ever be changed ever again, which is great. When you're operating in the DeFi land where you know you've got pictures of monkeys on NFDS or I don't know you, you don't know me, and you're issuing a random token, that I want to audit the code for to make sure I'm not going to get my token stolen or you're not going to dilute me just by issuing new tokens. But, that immutability is a double-edged sword. That immutability turns into a massive + +[05:00] liability for financial institutions, that expect to update these smart contracts at a regular cadence. When either a zero day bug is discovered or whether regulations change. As you all know, we're in the very nent stages of tokenization as a industry, as a market segment. Maybe only three to five years tops. And. Because of this immutability, these asset managers were very hesitant to adopt. Because when you do smart contract upgrades in the traditional sense, which is you're deprecating the old contract and transitioning everyone to a new contract, it's a massive pain for the asset issuer. It's a huge poor user experience for the end user as well. So what we're deploying out there into the world looks, feels, and smells like a traditional brokerage account, that's built on these diamond proxy standard. One of the key reasons, that all of the tokenization initiatives + +[06:00] to date, apart from a few very diff very few exceptions, have happened behind walled gardens is. Because of this feature. Because they want to carefully control the entire blockchain, that they're operating on. So. If they need to roll something back or. If they need to upgrade things, they have the complete cart launch to do so. But we all know, that there's inherent liquidity on public ledgers. And. If you want to democratize access to various financial instruments and increase your investor base, you need to be able to tackle these public blockchains. This entire concept, this entire challenge is why we decided to build the diamond proxy standard on Sora. I'll pause here very quickly for any comments from you, Carson. Yeah, I just want to say, that. If anyone watching here has any questions, please feel free to post them in the chat and we will try to take them as they come in. But. But + +[07:00] yeah, I think, that was a, great intro. Maybe maybe just very briefly describe what the diamond pattern what, that is for those who are less familiar with it. Of course. So the diamond proxy standard is functions similarly to how proxies work in the traditional web 2.0 world. If you're familiar with them. So instead of having a monolithic smart contract structure, you actually have something called the diamond smart contract, which basically. If you think about it all typically all of the smart contracts have their independent stoages and they're all hardcoded to each other to work with each other. If you've got a marketplace smart contract. If you've got a fee distribution smart contract, all of them are kind of linked together in a hard-coded manner, for the lack of a better word. What the diamond proxy allows you to do is have a common storage for all of these smart + +[08:00] contracts. And there's multiple facets on, that smart contract. And each one of these facets are responsible for a specific kind of functionality. Now, at a 10,000 feet overview, and Thomas can get into, that a little bit deeper, at a 10,000 feet overview. When you need to change anything at all in a diamond proxy, you're simply able to shave away a facet and replace it with a newer facet without impacting the rest of the functionality of the contract or without having to do costly reissuances and migrations of those tokens. Great. Okay. So, I'll pause for any highle questions. And then I'll let Thomas take it away and get down into the nitty-gritty. Sounds good. Here's there was one question from the audience asking for a link to the standard. Now, did you want it in just like a GitHub repo link of our SCP + +[09:00] filing. Because that's what we're going to go over next. Yeah, I can post, that one. Yeah. Okay, there we go. Yeah, there you go. So, yeah, we are proposing a set. We filed this up a couple of months ago, and now we're ready to drive additional eyeballs on it from the community and have it start to be adopted throughout the Soroban community as well. We'll also be going through Sor OpenZeppelin audits of the smart contract in very short order over the next month or two. So, yeah, Thomas, take it away. If you want to share your screen. Screen. Great. And just get this set up. + +[10:00] All right, the diamond proxy standard. So before I get into discussing the jargon, that's used here. Because somebody looking at this for the first time will be inundated with all these new keywords. I think as a developer speaking to other developers, it makes the most sense to approach this from a common perspective. typically. When we're developing a smart contract, we can think of a smart contract as like a class of sorts. It's something, that runs and it's structured. You can add functions to it. You can add data fields to it. You can control how those data fields are accessed through different view modifiers. Modifiers. And in web 3.0 or in blockchain space instead of having a class just running + +[11:00] on your local machine, which is the case for normal object-oriented programming. When you're talking about paradigms like Stellar or Ethereum anything like, that we have a distributed class so, that class file, that code is being executed across many machines at once. And so there's a beauty and elegance to the notion of smart contracts. Now, in order to achieve, that, you have to have consensus and there has to be pin to the location of where, that smart contract exists. And, that's, that's one of the reasons why we have to have the diamond proxy. Because it's great. You can go ahead deploy smart contract. But we quickly run into + +[12:00] the problem in the web 3 space where say you have some business logic and you deploy it. A month down the road manager approaches you and says, "Hey, I would like this functionality changed." Well, in order to do, that, you're going to have to a redeploy and b change the pointer of the smart contract on chain and tell anybody who's using your system, okay, don't target this contract address anymore. Target this one. And. So you could have, let's say, some kind of layer, that automatically reroutes all, that for you. And people were doing, that for some time. But what. If there was a way to proxy and reroute calls on the blockchain itself? So how can we + +[13:00] go from something, that is intrinsically immutable to something, that is actually mutable? And. So it seems like a little bit like magic. But it really isn't. And therein lies the beauty of the diamond proxy is, that it is simple yet seems like magic. With the diamond proxy, what you have is you have several stable points in me in the decentralized memory. Those stable points are your proxy and your shared storage layer. As far as a user is concerned, their interactions are going to target the diamond proxy. So let's assume a very simple case. I want to deploy a smart contract. What I do is I make sure the diamond proxy is deployed + +[14:00] and. Then I deploy the smart contract not directly. But through a proxy. So the diamond proxy through a function called diamond cut deploys the smart contract, which we call facet and initializes it. When it initializes the facet. In Stellar, we have a macro within our source code, that allows you to seamlessly integrate with this diamond proxy standard on Stellar such, that it injects another stable point in memory, the contract address for the shared storage layer. So whenever a facet is deployed through the proxy, it gets a pointer to, that shared storage. + +[15:00] What does this mean? This means, that let's say you deploy a facet. And then you update the logic again. Because you have a shared storage layer, it doesn't matter, that you have two separate smart contracts. Now facet A and facet B can still access the data, that you had mutated within the ledger. That, that's, that's just great. We really want to give people the opportunity to have the freedom to change data without having to go through the hassle of having to deal with the issues, that exist with the immutability. And in reality, this sort of functionality is really what you want anyways. Because as any developer knows, you're never actually truly done with a + +[16:00] program. You can get 95% the way there or 99. But there's always part of you, that feels like you can always improve it somehow. You can always do something else. And indeed in the world of business. When you have business logic and changing business requirements, you can't have, that immutability. When you're talking about the business logic. You have to have breathing room for it to be able to change. And. So the diamond proxy standard gives us the ability to go from a complex immutable setup to a very straightforward approach to maintaining code bases, that can be arbitrarily complex. So. So let us consider a facet. We have facet here and we want to be able to execute a function. + +[17:00] function. Typically you would just you know using Sorabon you would invoke a function directly on this smart contract, which in this case we're calling facet A. But. When you're using the diamond proxy standard you don't call the function here directly. Instead what you do is you call the function through the diamond proxy and, that function, which within the framework of this the jargon we use is selector function equals selector for all intents and purposes. So you may hear, that interchangeably. When studying the diamond proxy pattern. You run the function through here and the proxy internally looks at a set of selectors and those selectors are associated with a contract address. Address. And. Because it knows where, that contract + +[18:00] exists, it. Then proxies the call to the associated facet, whether, that be facet A or facet B. Let's say facet A has two functions alpha and beta. And then you want to update function alpha. What you could do is instead of redeploying a version of facet A, that just has alpha changed, you could just have a very targeted approach and deploy facet B. And facet B is much leaner. Because it has just function alpha. So the gas costs associated with deployment are going to be cheaper. And. When you call this, it's going to get the most recent version in the storage of these selectors. So + +[19:00] before. When you're calling function alpha, alpha, it's hitting facet A. But once you upgrade, you call function alpha, it's going to be routed to facet B. Now what about function beta? Well, function beta doesn't exist here. We deployed only function alpha within here. Function beta still exists here. So the diamond proxy will continue to route it. And. So as you could see there is a lot of room for flexibility here. Now. If the developer wants to they could still deploy a version of facet A, that has alpha and beta functions and beta doesn't change. But alpha does. And, that just means, that. When we call the function to the diamond proxy, it's almost as. If facet just doesn't exist. It's functions are not being called. But + +[20:00] that's not to say, that. If any calls you made to facet A in any way mutated the information, that you can't access, that mutated information anymore from facet B. No, you actually still can access, that mutated information. So you can imagine a scenario where as time goes on you get more and more facets over here to the right and you could have a very complex program. If you wanted to. And going back to the analogy of object-oriented programming in a class, you could even from like a non-upgrading perspective, you could just deploy different facets like different modules of or name spaces within your application to have a strict separation of concerns. So, not only does this allow you to have + +[21:00] that upgradability, it also allows you to have, that clear differentiation between different pieces of logic, that represents your program. So, from a bird's eye view, this helps consolidate your software's architecture. And not only does it do, that, as we just discussed, it helps you upgrade something, that would otherwise be challenging to upgrade. There's a second part to this, that ought to be discussed and, that is the diamond factory. Now the diamond facto's job is just to create a diamond proxy. So. So why would we do, that? Why not just deploy a diamond proxy directly? Well, you could. But there's a little bit of a setup phase just like how there's a + +[22:00] setup phase with facets. With facets, you have an init function, which receives a pointer to the shared storage. And similar here with the factory. When it deploys a diamond proxy, it ensures, that the diamond proxy has everything it needs to initialize itself. And. So internally, it's going to set up, that stable pointer to the shared storage smart contract. And, that there abstracts away any setup you really have to do. So as a user, you're going to use the diamond proxy. But you're going to first start off with the factory. You use the factory, deploy the diamond proxy. And then once you're at, that layer, you could just go ahead and treat this as normal. You don't have to touch the factory ever again. If you wanted to. Or + +[23:00] you could use the factory to create another diamond proxy and, that diamond proxy in turn will have its own shared storage address. And so on. And so forth. You can use the factory over and over again. As it relates to Sora, there is another feature of this SEP filing and, that's the storage helper. So typically. When we're developing in Sora, we have access to three different types of storage. We have the instant storage, and finally we have the temporary storage. And. So you would do something like storage instance.get or something like, that. And the same thing for temporary. + +[24:00] Well, I wanted in this to have a very similar paradigm. So. If you wanted to go from instance storage to shared instance storage, you would do storage.shared instance or storage shared temporary. And so on. So it creates a very familiar pattern for the developer. There's only a slight difference and, that's, that whatever you're putting in whether it be a key or a value. Because it has to exist within the storage of a diamond proxy to know where the selectors are and where all the metadata is. Is. It has to be serialized to XDR format, which as storebound developers know is the encoding format we use for + +[25:00] networking transmission. There's another component to the diamond proxy and this is more of a meta feature and, that's the loop facet. Now in EVM land they call it a loop facet. It's like a smart contract. But here it's different. We have it leveraging Sora's inherent capabilities and we have it sitting with the diamond proxy contract itself. So. If you want to know the address for selector alpha, it will return to you the address of facet A or. If you get upgraded. Then it would return you the address of facet B. And. So the loop storage allows us to + +[26:00] just introspect upon the diamond and it also stores the state of the diamond itself. Itself. Whenever whenever one performs diamond cut, which as we recall allows us to deploy a facet or smart contract, that will get recorded within the loop storage. Storage. Now, did anyone have any questions? Yeah, there were some questions. If we go back a little bit, let me see. I think there was a question about. When data is being stored. + +[27:00] Oh yeah, say silence asked it sounded like storage isn't written to until ledger closes in this setup. If that makes sense. Correct. Yeah. Yeah. Let's see what else. Yeah, I think, that was what I think Matias had a question too. I think we can continue. Okay. Yeah. Uber, I don't know. If you + +[28:00] wanted anything to add to this. But. If not. Then I'll just move on to deployment flows. Yeah, let's just No, no, that, that was great. Thank you, Thomas. Thomas. Okay. All right. So, we've already discussed this a bit. But just to visualize what's going on, you start off, you're the user, you want to ensure, that you have your diamond factory. Factory. Oh, sorry to interrupt you. But Silence had a question. that said, that you would access storage as storage shared. Yeah, you would I look want to see Oh, here we go. + +[29:00] So we have a pattern here, that's pretty much just like before. So you do env shared storage.persistent and I believe I worded it incorrectly earlier. But the idea is, that you have just a drop in replacement. So instead of env storage, which is what we typically do we do env.shared storage. And then you call persistent or instance or temporary on, that handle. Okay. Yep. And other important thing to consider is, that. When you have access to, that persistent shared storage handle, you need to make sure, that whatever you're putting in has to be serializable to XDR. Yeah. And. Then Silus also mentioned, that + +[30:00] that this would require the end shared storage would require a CAP. CAP. So. So but this is a CAP proposal as well, right? So so. If required we could make one and we've been told. So far, that what we have here is a starting point, that shared storage layer yes we're going to need to have it ingrained within the ecosystem and an incrementalist approach does make the most sense. But yes it will require changes to multiple parts of Sorabon. Okay great. Yeah. So you have diamond factory and the factory just creates diamond proxies. The user calls deploy diamond. + +[31:00] The factory deploys diamond proxy. And then the proxy goes ahead and it deploys the shared storage layer. So. Now you have a diamond with no facets. Facets. Now you want to add a facet. You want to start adding architecture to your software. So you go ahead, you call diamond cut. Diamond cut will deploy the facet. And then it will call the init function on, that facet. And keep in mind this init function, it's a macro within Rust. So you just add it right on top of the contract imple macro and it will inject the init function for you. That way. When diamond cut is called, it's passing a pointer to + +[32:00] this shared storage layer. The next step is after the facets's initialized, we update the selector or function mapping. That way, whenever somebody attempts to invoke a function, as we see here next, it will perform a lookup within, that selector mapping. So, we invoke the contract. After we call the function through the proxy, we might set some storage or get some storage update or delete. And then we return the result and the result has to be XDR encoded and, that's is the additional requirement, which is typical anyways. So you're just adding an additional layer and, that's why again it's + +[33:00] considered a proxy. Are there any questions about this deployment process and the life cycle of working with diamonds? So. So I think Neil Nish had a question earlier about how would you design an upgradeable contract. While managing storage efficiently. So, Milage I think we address, that later on down in the presentation, right Thomas? Yeah, we do. Yeah. And apart from, that, how would you design? Usually you have to migrate the storage. Similar concern to traditional web two development. Yes. So again, the same question came up again, that you know how would you handle storage? Excellent question guys and the entire intent of the diamond proxy standard is, that you don't have to manage smart contract storage migration. Yeah. That's, that was also my takeaway. + +[34:00] Great. All right. So I go down and I'm just going to compare what we have here, the diamond proxy, to what already exists in Sorabon. And in general, there are pros and what one chooses depends on what one has as requirements, any preferences. So I'll start off with the first one, which I think we've already discussed, which is the notion of granularity the ability for us to be able to define in very compartmentalized terms or even very uncompartmentalized terms the design of our software In + +[35:00] In Sorabon. If you want to update the smart contract, what you have to do is you have to call a function and, that replaces the entire smart contract. and for some people, that's all, that's needed. But. If you have more complex requirements, you don't want to deal with the headaches of let's say migration, which is the next comparison here. Then you're definitely going to want to use the diamond proxy pattern. And as discussed, we have, that shared storage layer means, that doesn't matter how many times you upgrade your facets, it's going to always have a pointer to, that shared storage layer. And with Soraon, you replace a contract, you're going to have to manually manage + +[36:00] how you access, that data and where, that data is going to exist. Those are things you have to consider. But with the diamond proxy pattern, in Stellar terms, you don't have to worry about, that. You can just act like the database is always there. It's abstracted away. The code size limits. Now this typically in my opinion the benefits to the diamond proxy here in terms of code size limits is, that you can have a very small set of code and you could just do incremental changes. So you deploy a facet. And then you deploy another facet and you keep things exactly as needed. You could do a monolith. A monolith is completely acceptable. The diamond proxy + +[37:00] can certainly handle, that. In Sorabon you're limited by the maximum size of the contract code, which is fairly large. But with the diamond proxy pattern let's say the limit on Soroban is n well. Then then your program let's say is 2n what you do is you have several facets. Then the first facet is of length n in size and bite code. And then the second facet is also in size of length and of bite code and you have different functions for both and. When you call the diamond proxy it's going to proxy it to the right smart contract address. So you completely get over, that hurdle of the code size. And by proxy you have, that same + +[38:00] principle applying to the method count. Now the number is still pretty high for both. But nonetheless just imagine typical code base, that may have nowadays thousands and thousands of functions. Functions. The diamond and proxy pattern you can go ahead and you could accommodate for such and you can have very large programs. theoretically. If you wanted to, you could have a diamond proxy pointing to hundreds. If not thousands of facets, that in total are gigabytes in size. it'd be very interesting in terms of a case study to see somebody go forward with, that. And really, the sky's is the limit there. And I think people will do some very creative things with this. We've only really just touched the surface here. And with the performance and security and stability + +[39:00] that Rust provides, I really think, that in contrast to EVM, we're going to have not only Rust developers in my opinion are just more seasons, seasons. But they pay close attention to the fine details. They're very good at what they do and, that's reflected by seeing how elegant Stellar is and the Diamond Proxy is complimentary to all of, that. It's a simple and elegant solution, which opens up the sky for everybody and Stellar it's in its name. I think Stellar is meant to be married with the diamond proxy. Because in a way it's what allows access to those higher realms. So to speak. The other comparison between the two is modularity and we've already talked + +[40:00] about this. You can design your software. You could have different categories of your business logic in different facets. And you can visualize it as you have facet A. And then you have facet A1, which is an upgrade to facet A. And then in parallel you have facet B, facet B1. And so on. And so forth. So you could independently upgrade these parts and still separate the business logic. So it's clear you have the ability to through loop functionality to introspect upon these functions and the addresses. There's no notion of, that directly. With a smart contract you can of course get the AI + +[41:00] that's different, though with introspection this is live through actual transaction calls and keep in mind every time you call diamonds cut, which is deploying the facet again the diamond proxy is recording, that in storage. So introspection will just give you, that information, that it's recording. Okay. So it sounds like everything's good and all with Diamond Proxy. But as I mentioned from the very beginning, there are indeed pros and cons. One of the cons is, that. When you're setting up the diamond proxy, you're also going to set up the diamond factory. And the diamond proxy is going to have to set up shared storage. So, this is really just a one-time thing. So, you have a constant initial cost. And then you also have costs associated + +[42:00] with deploying a facet. And. If on Sorabon you want to deploy a smart contract, it is direct. But here it is indirect. Because we have to call diamond cut. Diamond cut has to ensure, that the bite code exists on the blockchain. And then it has to run the functions to initialize it and, that also includes passing in, that shared storage address. So. Because of, that yes there is not only a constant initial cost. But there is a constant cost associated as overhead with each call to diamond cut. So on Sorabon you don't really have, that issue. It's just a onetoone deployment. + +[43:00] It's not a one to two or three smart contracts being deployed. It's just one to one. And to be clear, that one to multiple deployments only occurs in the very beginning. When you set up the factory and you set up the shared storage. But once you have, that set up, you don't have to reinitialize the shared storage. You just pass a pointer to it. So really, you're just deploying the facet smart contract in the direct proxy. But. But one of the key benefits of doing it using the diamond factory is, that let's say you're spinning up something like a let's take the example of a DEX pool right. When you know, that you have to have the same contract replicated multiple times. If you're using the diamond factory, that will reduce your overall cost in the long run. Because instead of having to deploy the entire contract from scratch, you're actually calling the diamond factory, which the + +[44:00] bite code already exists on chain to deploy those contracts. Thomas, correct me. If I'm wrong there. Good. Yeah. want me to continue? You want there? Yeah. No, no. did I not you guys not hear me? Oh, yeah, we heard you. Okay, cool. Yeah, no, go ahead. We just discussed, that there is a bit of complexity and yeah, that does entail, that the transaction costs are going to be higher, and the costs are going to be lower on Sorabon each time. This point the development experience this really varies in my opinion there will be an initial higher overhead + +[45:00] simply. Because you're going to see all this jargon. What's a facet? What is loop introspection? What is diamond factory? What's the diamond proxy? What is all of this? And at first it might seem like a fire hose, that it's really overwhelming. But. If I draw our attention back up here, it's actually really simple. And once one goes through the process of using this and seeing it in action in code, it becomes something, that is just intuitive and simple. So there is, that initial hurdle and, that is why in the very beginning of this presentation I discussed let's set aside the jargon and let's talk about this in terms, that we're all familiar with the notion, that we have a class in object-oriented programming and we want to be able to mutate it. How can we do, that in web 3 space? So I think + +[46:00] going with, that approach. When teaching this to others is you guys will eventually do you guys continue to learn more about this and actually apply it. I think just sticking to a simple analogy is the best approach. Less is more certainly here. It should not be something, that one overthinks. I know I certainly did. When I first looked at this. But the end of the day it's fairly straightforward. And in terms of state sharing again we already discussed this. So I think I will omit, that one. Are there any questions about the paradigm of the diamond proxy being brought onto Stellar or any logistical questions? Anything we'd be happy to answer. + +[47:00] Yeah. Matias had some questions about large systems built on this, that how do you handle DevOps and monitoring etc. Etc. And yeah he says I'm guessing you would also offer at least a tool to handle the orchestration. Orchestration. So in terms of DevOps. If you want to be able to understand what's happening to let's say you have a very complex program and smart contracts may be dynamically deployed based on criteria, you need to be able to monitor, that. And so, that is an excellent question and, that is one of the reasons why we have the diamond loop allows us to introspect. But there's another part to this SCP filing and, that's, that. When you perform a diamond + +[48:00] cut events are emitted and with those emitted events we have built in Rust a Stellar indexer, that captures these events. Events. And. So you're wanting to do DevOps, you're wanting to make sure okay everything is good. There aren't issues and. If there are issues we want to see them right away. You have an indexer run. You pass the index or the contract address of the diamond, which as we know is a stable address. It's not going to change. Change. And your indexer will. Then listen to events and you will have some application then, that listens to specific types of those events and. If certain criteria are met, you can send an alert to pager whatever software you decide to use. + +[49:00] So really it's through indexing and listening and diamond loop functionality as well as listening to just those emitted events, that we open the door. As for do we provide like an actual toolkit, that allows one to monitor their diamond? We don't have like an actual program to do, that. We have some components, that allow one to build towards, that. And I think, that as a community working towards, that would be a very important next step. Because then at, that point we open the door to organizations very large ones in fact, that they want to build out complex software. They don't have to worry about the immutability of blockchain as we discussed. They just hop on board. And then they already have this open-source toolkit, that does all this monitoring + +[50:00] for them. We it's certainly something, that can be done. There are no limitations there. Good question. Yeah, there's another question. If you have a demo app, that's built on Stellar, that are using this. Yeah, we do. Okay. Yep. Before we jump into the demo app, Thomas, maybe it would also be good to show them the diamond looper, that we built for seller to introspect this. And then jump into the app. Oh, for this presentation as we discussed, we would just go straight into the, other app. But we do have a link to repository to show, that, and it is getting short on time anyways. So we will go straight into the app. No worries. Sounds good. Okay, + +[51:00] Thomas question by silence there to say, that. If you made an app there must be a contract to handle shared storage layer correct. Because he's feels like he's having a fundamental misunderstanding. So you don't even have to worry about the shared storage layer, that's abstracted away. It's, that. If you want to access storage in Sorabon, you're not calling env.persistent. Let's say you're doing env.shared storage.persistent. Storage.persistent. That's the new pattern. So, it's very similar. yeah. Any other questions? + +[52:00] Okay, that sounds silence. Once you review it, I think it'll make itself clear once you actually have a peek at the SCP. But go on, Thomas. Yeah. So, as we're developers, I'm just going to draw our attention to what this application looks like from a more programmatic view. View. We have the diamond factory, that we talked about. This is what we use to actually deploy. And we have all these facets, that we've deployed. And these are going to be used in a way, that allows us to keep track of KYC. So in this application, we have a set of compliance rules. So we have some say base requirements, that a person + +[53:00] identity must pass. They have extended KYC requirements. And then somebody could come along and say, "Well, hold on. I was just told by some regulatory agency, that we need rule 54b or something." So, we go ahead, we add, that in. We have. Now the set of constraints, that one must pass in order to pass KYC verification. Verification. You have, that on the ledger. Right. Now I'm pointing at says Stellar test net. But really it's pointing to a local quick start docker image for development purposes. You have trusted issuers. In this case, we just have the company + +[54:00] name and the trusted issuer has an address and they have defined a set of compliance rules. And. If you look here, we have these rules and say want to add rule 54b, we can go ahead and do, that. And we update the trusted issuers's information. And. Now the trusted issuer manages these compliance rules. We have identities. These are individuals. In this case, several of us on the call, Uber and Sebastian are identities here. And. If somebody wants to go through the KYC process, they would come here and they would add in information, that allows us to + +[55:00] keep track of what's going on. So, organization admin, this is called the admin portal after all, allows one to keep track of this in a way, that increases public trust. As far as how this relates to the diamond proxy, well, you can just imagine a case where we want to change the data structure. Let's say we want additional columns here in the data. If we just had an immutable smart contract, we couldn't actually just change it. What we would have to do is several things. We would have to create a new smart contract, redeploy it, take all the old data from the first smart contract and copy it over and, that's not fun. But with this approach, you can go ahead and you can upgrade it and it's still going to point to the same information. + +[56:00] the only sense of migration really is just you keeping track of okay am I changing the shape of the data internally in any way and then, that comes down to the burden as always is placed on the developer to consider carefully their data structures inform the architecture of the overall program. So right So. So just wanted to address some questions, that came up. there was a question by Green here about us validating the demand with Stellar developers. U, no, we didn't, we the target audience for this wasn't initially the Stellar developers. We once we actually showcased this to the internal foundation team, they were excited about it. They wanted us to showcase it to the development community to be used in other Art of Leo tokenization frameworks where we did see a significant demand for it come from was from asset managers + +[57:00] themselves people, that were actually. If you're asking someone to do RWA tokenization and you're asking them to transition millions. If not billions of their assets on chain this is something, that they felt comfortable getting behind and secondly with respect to Nomics ID what Thomas just showcased to you is a digital identity framework. We'll also be filing an SCP for, that. And creating a standard for anomics ID on the Sora network. What the Nomix ID would allow you to do is create digital identities on chain, that you can. Then enforce these compliance rules on a token. Just cut off. Oh, hello. Can you hear me? Oh, yeah. Uber we can't hear you. I can hear you. Oh, okay. U Yeah. So. So what it would allow you to do is create these digital identities, collect the appropriate documentation, that you need against each one of these + +[58:00] compliance rules, that's being set against a particular identity, and then. If you actually we're running short on time here. But I'm happy to showcase this demo at a later stage. When you actually go to create or issue tokens, that are part of this diamond proxy standard, that also include the identity storage as well as the token storage itself. You're actually able to superimpose individual tokens with prerequisite compliance rules. So it gives DeFi a somewhat centralized flavor, which is mandated by regulators in order to facilitate the transition of RWA onchain. Right. So, so. So to answer your question in a roundabout way, I am green. The entire intent was. If we want asset managers, that have not interacted with blockchain technology, that are have no clue about the underlying tech. But they want to get on board with the tokenization bandwagon and they want to come into RWAS. Telling + +[59:00] them, that their contracts are actually upgradable in a very modular manner gives them peace of mind. While still preserving the audit trail, that already occurred on the previous transactions in the previous iteration of, that contract. Great. Yeah, I think it makes a lot of sense. U this may not be a good fit for all projects. But I think you had some good use cases where this makes a lot of sense to build it this way. Right. No, it would all be article ways. Because otherwise you are adding unnecessary complexity unless you're creating a DeFi protocol, that you is more experimental and you're expecting to update it on a regular cadence, maybe, that would be a good fit. But. If it's just a oneanddone deployment project, yeah, you're better off just doing it the old way. But. If you are doing RWAS and. If you are trying to win clients in the RWA space, giving those clients this peace of mind. Because legislation always + +[01:00:00] trails innovation. There's going to be more legislative rules and policies, that are going to come in place, that these smart contracts will eventually have to comply with. Yeah. Great. I don't know. If there's any last minute questions before we end here. Here. Yeah. Now you get it. My apologies. We should have started with, that shared. The entire purpose of the shared storage is. Because if you upgraded a contract and you tell me all of my historical transaction data is gone, the SEC is going to be banging on their doors and it's gi well played. Game over. They will never adopt. This is a way to soften the blow for asset managers to bring them on check. Yes. By a whole lot. Yeah. Because before, that they didn't even want to talk to you. You. Neil, you say you have a question. What is your question? Is it related to the topic here? + +[01:01:00] , yeah, I don't think we have any more questions. Well, oh yeah, learn American language. Okay, Okay, it's more of a statement than a question. Question. Yeah, I agree 100%. Learn Soroban. Yeah, great. Thank you. So much for joining today. This was super interesting. I'm looking forward to see the progress and see how this will play out. I think it's it there's definitely some use cases where I see this has massive benefits. So yeah, super exciting. Thank you all for joining and thank you for everyone who viewed this live stream. We'll be back again next Thursday. Thank you. Bye. Bye. Thanks, guys. And look out for comments from both Wald and Silence. Thank you + +[01:02:00] guys. Guys. Thank you. Have a good one, guys. See you. Take care. Bye. + +
diff --git a/meetings/2025-07-24.mdx b/meetings/2025-07-24.mdx new file mode 100644 index 0000000000..4d4c6ecffa --- /dev/null +++ b/meetings/2025-07-24.mdx @@ -0,0 +1,126 @@ +--- +title: "PaltaLabs' Stellar Hacks" +description: "Discussion with PaltaLabs on the Stellar Hacks hackathon, focusing on building with Soroswap and DeFindex, including smart contract architecture, APIs, and ideas for hackathon projects." +authors: [carsten-jacobsen, esteban-iglesias] +tags: [developer] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session features a conversation with Esteban and Francisco from PaltaLabs about the ongoing Stellar Hacks hackathon and how developers can build on Soroswap and DeFindex. The discussion walks through the motivation behind the hackathon, what participants can still work on, and how PaltaLabs’ tooling lowers the barrier to building DeFi applications on Stellar. + +The team provides a practical overview of Soroswap as a DEX and aggregator on Soroban, along with DeFIndex as a vault and strategy framework for yield and savings products. The meeting emphasizes real-world developer workflows, available APIs and SDKs, and concrete ideas for hackathon submissions. + +### Key Topics + +- Overview of the Stellar Hacks hackathon with PaltaLabs and remaining time to participate +- Soroswap architecture: + - AMM for creating and managing liquidity pools + - Aggregator smart contract to route trades across multiple AMMs + - Public routing API to compute optimal trade distribution +- Using Soroswap’s router contract for swaps, liquidity management, and pool creation +- Trade splitting across protocols and how the routing API supplies optimal paths +- Open-source Soroswap indexer built with SubQuery and GraphQL for analytics and bots +- DeFIndex overview: + - Vaults as user-facing savings accounts + - Strategies as pluggable smart contracts connecting to DeFi protocols +- Example DeFIndex strategies, including auto-compounding and blended yield approaches +- Vault roles (manager, rebalancer, fee receiver, emergency) and safety constraints +- Ideas for hackathon projects: + - Smart wallets and integrations + - New DeFIndex strategies + - Rebalancer bots and automation + - LP token use cases and staking designs +- APIs and SDKs for both Soroswap and DeFIndex to simplify frontend integration +- Developer support via documentation, Postman collections, and community channels + +### Resources + +- [Hackathon](https://dorahacks.io/hackathon/stellar-hacks-paltalabs) +- [Soroswap Documentation](https://docs.soroswap.finance) +- [DeFIndex Documentation](https://docs.defindex.io) + +
+ Video Transcript + +[00:00] Hello, welcome everyone to this week's Stellar Developer Meeting. Today joining me are Francisco and Estan from Pzelabs. Pzelabs should be familiar to any of you who's been in the community for a, while. They have been building on Stellar for yeah way longer than I have. So welcome guys and yeah just please do quick introduction and. And then we'll get started. Great. Thank you, Carson. My name is Estean. I am building patals since 2022 where we met all the Stellar team back in Meridian Rome. Since. Then we have been building source undefined and in this presentation we will explain a bit more, that Francisco. + +[01:00] Yeah, I'm Francisco CPO Baltabs and we are excited to present the hackathon and motivate everyone to build here. Yeah, as a quick yeah, a quick comment, we are with Francisco. We are currently at the Draper University in San Francisco. So I will encourage all developers to join all the programs like grants programs, that Stellar has. Because like the opportunities, that you can get like we are getting. Now like currently I am in the dorms of the draper university the opportunities is they're really great. Great. Yeah, Draperu is a program we started is a collaboration between Stellar and Draper University we started last year and continuing this year where like you guys you stay for three weeks + +[02:00] go through a really intense program with a lots of different things everything from business to yeah the financial parts and it's a great program but, that's not what we're talking about today. You are running you're running a hackathon with Stellar hacks and I think to lead up to, that to talk about the hackathon the idea is, that in the hackathon, that the participants are building on either source or on defendex or both. So maybe we start with an introduction what is source swap and what is defendex and what can you build on, that? So yeah, please take us through the basics of both products. Thank you, Caren. Please, Francisco, I will share the screen. + +[03:00] Great. So ster hacks with the ster hacks you can swap and build vaults with palas source app and defendex. So let's talk a bit about source. And then we will talk a bit about defendex. If you have questions I think you can write them in the chat right. So I will stop. If anyone has any questions we will stop there and maybe we can share more our screens we can take a look at the codes etc. Source is a DEX and a DEX aggregator on the STA blockchain and as a brief introduction it was the first DEX built on Surban on the smart contract side of the Stellar and we aggregates liquidity from multiple liquidity sources or multiple platforms. So on the right you + +[04:00] have like a swap, that got gets loaded through Soros swap, Aqua and Clinics AMMs. In order for you to understand like the tech of Soros. And so you can have like more ideas on how you can hack and build on top of source you need to understand, that source has three components. Components. It. There is the AMM. Then we have the aggregator, that aggregates is a smart contract. And then we have a road API. So our road API is the one, that will, that will optimize your trade, that will be executed on the regulator and. If you get a trade, that will go through source AMM it will go through our pools. Source of AMM is where you can create liquidity pools. If you want to do like a simple swap or. If you are building a + +[05:00] protocol, that needs a liquidity pool, you can create a list liquidity pool on our AMM. It's a classic swap version two type of AMM. And we have a router contract, that does everything for you. So you can create new pairs through the rotor contract. You can add liquidity through the router contract. Remove liquidity and swap through only one contract. So you only need to understand this one router contract and we will put the links later. But it's github.comsource core. So all the core smart contracts are in, that core repository. Now I am reading the comments sorry. Yeah hacks. Yeah let's go. Great. And. Then we have the aggregator. If you want to aggregate multiple protocols we + +[06:00] already built a contract, that handles all those transactions. So you don't need to think about Fenix or source. You need only need to inter interact with only one contract. And then inside, that aggregator contract we have all the adapters, that will help your transaction go through all those different AMMs next slide please. So this is how it looks a transaction through the simple. So you just put token in, token out, the amount you want in, the minimum amount you want out, and. But the tricky part is this DEX distribution like how you distribute your trade between different exchanges, between different dexes. And this is how yeah, no next one. + +[07:00] So you will need to create this object. But you don't need to calculate it yourself. We have it in our road API. Yeah one back sorry the yeah this one. This is how the transaction looks like on the ster explorer. So you put like how many parts you want of one trade to go through protocol zero, which is Aquarius how much part you want to protocol two, which is source up yeah any questions until. Now no through this aggregator you can touch all those liquidity sources it's only one contract, that your app or something, that you are building needs to interact with it's only one contract and this contract will execute all transactions and will do all these subcontract calls necessary for + +[08:00] that trade to go through. So how do you get this optimal vector of DEX distribution? You can calculate yourself or you can force it. So you want you can force it to go through like specific protocols. That's also, that's also a possibility but. If not you can use the API, that we have and this is like the third part of this source app tech stack. We have a public API, that anyone can use. We created an a APK key API key for those, that are building on the hackathon. So you can experiment with our API and the easy thing the cool thing is, that you just put what you want to trade, what are the protocols, that you want to implement. And then returns you like what is the best way on + +[09:00] how you split the trade. So we have in the description of the hackathon there is a link to our postman collection. If you want to talk with the API you can go and check the postman collection. Because it will make it much easier for you how to interact with the API. API. And next one. Yeah, this is the all the documentation you need. We have also an SDK. I forgot to put it in this slide. But there is at source SDK. That's the npm package on TypeScript. If you are building on TypeScript. So you don't even need to build all those API calls. There a function for it. And we have on the telegram group we are sending reposit example repositories, that of projects, that use + +[10:00] our APIs. There is a question here Matias the distriution in this case will be the 60 3010 shown in the previous slide right yes. So in a previous slide there was like this distribution of 60% 30% 10% and cor, that's correct, that the next distribution you say I want 10% to go through Phoenix and the path will be XLM direct to USDC. Then you put 30% I want it to go through Aqua going from XLM to Aqua to USC. Because I think or you think or your program thinks, that's the best way to do it and 80% to do it or 60% to do it through Sora. Yeah. Great. + +[11:00] Yeah. And finally we in our back end for our API we use the source indexer and this project is completely open source. So. If you plan to build something, that requires information of the traits on source. If requires yeah. If if. If you need to build an indexer or. If you need to improve the indexer, that we have the link is on, that slide. But it's sorosab.com sorry github.com/sorosabsupql, which is the technology, that we are using. indexer with subql and of course. If you're more interested talking the telegram there is the readmi has all the description on how you create this indexer and yeah you can build like maybe like a front end, that shows the last transactions or + +[12:00] a bot, that will send you a alert. If there is like a difference of price. So you can buy a token. Something like, that. Does the source of indexer relies on mercury? No, it doesn't. The source the SUQL indexer it's its own technology. It's on TypeScript. There is like Docker. So you bring up like three different Docker containers. One, which is like the worker, which is doing calls to the RPC all the time to get the last test block. And then it will ingest it to another container, that will check, that there. If there is the event, that you want to check. And then it will ingest the information to a local data database. Then later it's you can query it with graphql + +[13:00] So. Now the findex I'm not going to see the questions. But so. If the stean or cast can tell me. If there is any question. Yeah cool you know let me know. Well. So the findex we want to make easy. So what is the findex? The findex removes the technical complexity for apps, that want to offer savings accounts for their users. So you know, that if. If you're a wallet or an application end user application you may want to offer some sort of savings account and at the same times + +[14:00] you have defy protocols or real world assets, that can provide you Jill to make this feature of savings accounts possible. But this is difficult h you it's technical to integrate defy it's difficult to manage risk and difficult to monetize on top of, that and it's time consuming. So we created the index to help with, that. So our plugand play software can make you earn. When you uses earn and all with, that with without the crypto complexity. So with the findex. If you're a wallet + +[15:00] you use our plan and play software and you can offer yield to your users and this is done by using defy strategies. So. So you can use this device diverse device strategies to offer multiple options. You have automation on the smart contract side and you can always look for the best earnings without the need to for without the need of the user signatures for every movement. So to understand the findex we need to understand two main concepts one is strategies and one is v. So for the strategies is the smart contract, that connects the DeFi protocol + +[16:00] is the connection, that we have with DeFi protocols and also we can include there some automation, that can benefit the users. And. Then we have the vaults. The vaults are the savings accounts. It handles all the fee distribution, all the accounting for users. It manage all the how many users you have, which, which user has the amount of the funds, that any user has etc. So let's see an example. So this is the blended strategy, that we have right now. We have it for USDC euro C also. So we basically we lend USDC to blend on in a blend pool. We + +[17:00] harvest the blend rewards. We swap this blend rewards for more USDC and we lend again. So we get this autoco compounding effect on the blend blending and borrowing protocol. Then we on the b contract we can include multiple strategies. So we you can expose the users to multiple pools. In this case, we have this USDC strategy on the blend fixed pool and on the blend gel blocks pool at the same time. So. When a user deposits into the B contract, the funds are distributed automatically on both pools. Also. If you want to you can have like one 100% of the money in just one strategy. But + +[18:00] that could change in the future. So you can move all the fonts of your users to the other strategy, that is performing better. And, that the good thing is, that the user don't need to know, that their phones were moved. You just move it. But also you as a manager of the vault you cannot take the funds of the user out. Then. So this is what we have right now. So. If someone wants to create a new strategy, that could be more interesting like a leverage position strategy, strategy, you will need to implement the strategy trade. So this is how it looks like. So + +[19:00] you just need to put code inside, that reflects the strategy you want to. do with the find index with the index just need to comply with this function and. If it complies with the function you can plug it to the index vault and, that strategy will be exposed for any wallet, that want to offer, that kind of strategy. Then on the board, can I say a small comment on the strategy? Strategy? Yeah. Yeah. So we want to see more strategies, more ideas, that you can come maybe other more can be more risky maybe using other protocols. + +[20:00] , as Raso said, like the average strategies, you can short one asset, you can long like 9x with the current like. If if you. If you check with blend, you can lend borrow. And then swap and borrow again. So you can think about those kind of things or even using other protocols like sa Yeah. So, that would be like a very good idea of a hackathon project, that works with both protocols as well. Matias is asking for the harvest function what will data will be options this byes. Very good question. So. Because the strategy trait is general for any kind of protocols like say source swap blend + +[21:00] or normal or Aqua FxDAOo you don't know we don't know beforehand what are the parameters, that harvest function will need probably you will need to submit some like some instructions on how the trade should be. If there is a swap or not you know, that's and Matias yeah. So it's an optional parameter, that you may need for some logic. Then we have the b B contract role. So. If you want to create a vault for a specific use case and for example combining multiple real + +[22:00] world assets and you may need to create these roles in order to create a vault. So the manager can do everything. But taking the user fonts. So you can change who is the rebalance. You can change the other roles. Rebalance manager can only rebalance the receiver can only do fe related stuff and the emergency manager can rescue funds from the users. H. So in order to create a vault you can use it in our front end. Then you need to invest or rebalance for the first time. So the vault will know how to distribute the funds automatically. Then you implement it in your app in your front end and + +[23:00] start collecting fees. Yeah I want to go want you to go back to the roles. Yeah, here there is a challenge here. So. If anybody wants to work on, that, how to manage like co how to co-sign co-sign these messages. So maybe the manager can be like a smart contract with different co-signers, you know, or you can have you can use like the cosign u structure of a Stellar maybe like created a front end, that will help like manage defined exults. That's maybe something interesting. Some ideas. Yeah. Yeah. Multi multic. Thank you, Matias. Then. If you want to do is do it this programmatically or include this in a front end, you just need to create this trigger this function on our factory. + +[24:00] yeah. And. Then this is the front end we have right. Now is just for creating bolts. Bolts. So you just fill all the parameters here and you will have your vote. And then invest for the first time you can use Stellar C cli with the same with this instructions. And then we also have an API. So for the API we have an API key also for the findings right, that is public I mean we check this. So you can go to api defindex.io + +[25:00] docs to see the complete documentation on this. You can also go to docs.define docs.define defindex.io IO and you have like more extensive documentation. And so yeah, that's and this is like an example of how to. If you have already an API client you can use something like this. So for example. If you are doing a front end to use the a vault contract erh you can call the our endpoints with the function deposit for example + +[26:00] you just set the amount from who is going to be going to the deposit and we will return the sign transaction. Then you sign it and we can even send it for you. So. If you want to implement the vault in a front end this could be very useful. Yeah we also have we have deposit withdraw and we have the APY endpoints. So you don't need to build all those things. Yeah. So and we are here to help you. So please join the telegram group. We are going to be there answering as fast as we can and yeah we are here to help you. + +[27:00] Great. Thank you for the introduction to Defendex and to Soros Swap. I think it's great to see, that some of the other teams, that you are you're at with the Draperu, they're already starting to look into how they can integrate your services and I think many more projects in the ecosystem could benefit from integrating with you guys. Francisco, you are sharing your screen. I can remove it. Yeah, sure. Okay. I don't know. If anybody has comments and questions. I think there was a question of Matias, that was not answered. yeah. So, he asking. So for the source of a aggregator, he says you don't need to specify the distribution. + +[28:00] So you don't have to hassle to do the paths. That's not completely right. Because the aggregator needs as an input the specific path the contract. So the contract, that he cannot calculate what's the optimal path. However the API is the one, that will help you with, that. And you can try with the our API on public net. So. If you want to like start testing in production, you can do it. And yeah, we calculate the best path for you. But. Then the aggregator contract needs, that information. And. Then yeah. Okay, great. Let's change tracks to the hackathon. So right. Now we have + +[29:00] the Stellar hacks. I'll just share a link for the hackathon. And this hackathon is actually about building on top of Defendex and Soros. So maybe. If one of you can talk a little bit about it. I can Are you sharing your screen? Screen? Yeah. Share this one. No, it's not this one. Okay. But show to show the like to hacks web page. Yeah, let's here we go. Yeah. So, this is the second hackathon we're doing with Stellar hacks. The first one was integrating blend. This one is focused on + +[30:00] building on defendex and source swap. So. So yeah. Tell me more about what would you like to see? What is what are people building? So for example here in defy integrations is my screen is showing right? Yeah. So for example here we have a smart wallets a smart wallet using source and or the findex it will be great. So you can use our APIs the Pasi kit and build something with this an staking contract for SA LP tokens. So so, that would be great. + +[31:00] a re balancer bot this is great. I don't know. If you remember here. But we have this role and this role rebalancer manager. So we can you can create a vault with where this rebalance manager can be the bot and the bot can manage the portfolio for us. MCP servers, that can I don't know I can tell him hey I want to create a vault and rebalance it, that could be very great something to doing something like, that. Then for the finex strategies. So ah. So for most of these not for the + +[32:00] LPS stack staking. But for the smart wallet rebalancer bot and MCP server you will probably need to use the APIs, that we are providing. But then. If you want to use create for example an strategy you can start from the index protocol and modify the contracts for sample where the index here. So this is the main repo. You can go to apps contracts strategy and start coding there for more the index strategies and use all our the already existing variable. So you don't need to set up anything. You you wanted to say something. + +[33:00] Yeah. Yeah, I am very excited about the strategies. I think this is like the Finex is already like very robust in the boats and we need more strategies. We need someone, that will came with some new idea on how to manage funds. You know, we thought about blend and rebalancing. But you can think about something more interesting. So and as Francisco said is like there is a trade already for, that like you don't need to and, that's a trade, that will be will it be easy to plug it into vaults and another comment I wanted to say about the rebalance is, that our vault contract. When you are a rebalancing manager the vault doesn't give you any like + +[34:00] access to the funds at all. It only allows you to move from one strategy to the another strategy. So, that's why you can feel confident of experimenting with a rebalancer bot. All right. I wanted to say, that and about the LP tokens as Raph said in. When we first talk about this I am also very excited about I want to see people using LP tokens. So we created source swap erh. While like even. While Soroban was not on main yet and we created in a way, that LP tokens were compatible with the classic tokens. So and we want to see more people using LP tokens. Maybe you can send an LP token. Maybe you can create like a liquidity pool of two LP tokens. I don't know. Think about, that. Or maybe + +[35:00] a strategy can need an LP token to have an exposure 50/50 I don't know to different assets. Yeah. Great. So, there's still one week left of the hackathon and. If you want to. If you haven't already started building and you think this could be a great fun challenge to do and a great way to learn more about source swap and defendex. Then this hackathon is a great opportunity. Go to the link, that I shared in the comments, the door hacks link, you can read more about the hackathon the submission requirements, the deadline and what to do and these great examples of what the pot team think could be fun to see and would love to see being built. So. So yeah, go check it out. It's it's a great fun + +[36:00] hackathon and as I said there's one week left. So plenty of time to do something fun. Fun. It's up to the 1st of August. But yes until the 1 of August. Yeah. And and final comment we will be helping. If you have any question please ask them directly. Ask the question directly. Because some maybe we don't have the best documentation. I don't know we are trying to do our best together with a lot of things we are trying to do. So please be patient. If we don't. have. If you think you don't understand please ask. Because probably maybe we don't have, that answer in the documentation and it's very good way to help us to improve our documentation. We built these APIs. So you don't need to inter interact directly with contracts. But you don't need to do it. If you really + +[37:00] had want to hack hard, you know. So, yeah, thank you Karen for the opportunity and Raph and Boxy and all the team organizing this hackathon and Jerome. Jerome. Yeah, they're really doing a great job with these mini hackathons. So, big shout out to them as well. I see Raph shared a link to the Telegram group for the hackathon. So yeah, ask away. If you have any questions. It's there's still time to build something cool. So. So please visit the hackathon page. Okay, great. Any any other questions before we end this call? Doesn't look like there's anymore. But thank you for joining. I know it's a crazy busy week for you. Being at Draperu is a lot of fun. But it's also a lot of work. We we were pitching the Findex to Tim + +[38:00] Draper on Tuesday. Yeah, Tim Draper is a VC legend here in Silicon Valley. He was one of the first investors in many cool companies like Tesla, SpaceX, and ton more companies. So being a being having the opportunity to pitch Tim Driver is definitely an awesome opportunity. Opportunity. Yeah. Yeah. Because we think, that the millions of users will have their first Savings account through a wallet, not a traditional bank. Yeah. Great. Okay. Thank you everyone and you too guys. We'll be back here on Twitter next week. Thank you everyone for joining. Thank you. Bye bye. Bye. Good idea, Matias. + +
diff --git a/meetings/2025-08-07.mdx b/meetings/2025-08-07.mdx new file mode 100644 index 0000000000..7f0fb4f580 --- /dev/null +++ b/meetings/2025-08-07.mdx @@ -0,0 +1,119 @@ +--- +title: "Flashback Developer Spotlight" +description: "A conversation with Flashback founder Brieuc Berruet on Flashback’s live, developer-focused multi-cloud storage platform, plus a demo of connecting and managing buckets across providers and how smart contracts on Stellar fit into the upcoming DePIN marketplace." +authors: [brieuc-berruet, carsten-jacobsen] +tags: [spotlight] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +In this spotlight, Flashback founder Brieuc Berruet shares an update on Flashback and walks through a live demo of the platform now that it’s publicly available. The discussion focuses on the developer problem Flashback is tackling: making multi-cloud storage cheaper, simpler, and more flexible without locking teams into a single vendor. + +Brieuc, who participated in the DraperU x Stellar incubator program last year, explains how Flashback aims to bridge familiar cloud workflows (S3-style object storage and buckets) with a broader mix of providers, including decentralized infrastructure. The demo highlights the current alpha capabilities, the developer documentation and API approach, and how Stellar smart contracts will be used to power a future DePIN marketplace with verifiable service metrics. + +### Key Topics + +- Flashback’s core problem: multi-cloud complexity, rising cloud costs, and vendor lock-in +- Positioning: a unified interface to diversify storage across centralized and decentralized providers +- Current product focus on object storage with a developer-first workflow +- Platform walkthrough: + - Dashboard overview, docs, and API reference + - Connecting buckets from common providers and managing them in one place + - Organizing multiple buckets under a repository concept + - Using an S3-compatible endpoint to interact with different backends +- “Bridge node” concept for connecting providers and collecting bucket-level activity/metrics +- Early-stage product notes: + - Alpha behavior and rough edges surfaced during the live demo + - Emphasis on collecting feedback to improve UX, reliability, and integrations + - Roadmap mentions support for more secure/private bridge-node deployment options +- Smart contracts angle: + - Planned DePIN marketplace where providers publish offers and consumers select them + - Using on-chain logic to track agreements and link performance metrics to reputation + - Quality-of-service measurements (e.g., latency/throughput) feeding marketplace trust signals +- AI mention: + - Plans for a chat-style recommendation layer to help configure multi-cloud choices + - Cost and usage optimization suggestions based on observed storage patterns +- Discussion on decentralized storage tradeoffs (cost, speed, retrieval, developer usability) + +### Resources + +- [Flashback Website](https://www.flashback.tech) + +
+ Video Transcript + +[00:00] Hi everyone and welcome to this week's Stellar Developer Meeting. This week at Stellar we just concluded the latest cohort in the incubator collaboration we have with Draperu and I thought since we just wrapped this up and 20 teams graduated from the incubator program I thought it could be interesting to invite someone who went through the program last year. So today I have the founder of Flashback in the studio. So yeah, please introduce yourself and let us know what you're working on. Hello guys. So my name is Briu. So co-ounder of flashback. And so yeah we were part of this first and initial batch for the draper cross ster unbug program, which was I + +[01:00] think between yeah November 2024 to February 2025. And so yeah some just a bit of background about myself. So I have PhD in computer science mainly in machine learning artificial intelligence. So I did, that for seven eight years. Big patient with in AI and all you can use AI you know with cloud technologies and the fact, that today AI is very related to cloud technologies. I got very curious about what we call the decentralized physical infrastructure network the cloud storage cloud computing sovereignty data protection etc and of course since 2016 I discovered the cryptocurrency space initially with project like lisk and arc. So very old project I would not say I'm no g. But close to + +[02:00] And yeah, I discovered the blockchain more kind of patient and since March 2023, I decided to start my entrepreneurship journey start to build my first solution, which was a decentralized storage network. Trying to in fact build something for fast data retrieval. Because if you are trying. If you already tried you know fon iwave it's made for archival data. But once you want to do what storage and to use it in a very frequent way is not very compatible for this kind of usage. So we wanted to step and to try to build something there. But you know building solution going through for one year and a half we understood like we talking with thousand of I was hundred and thousand of funders with different conferences par blockchain week ETH progCC token 2049 we discover quickly, that yes + +[03:00] in fact flashback was born from an simple problem it's a vunderlooking problem everyone is facing, that. But yeah. If you want custom I have a small presentation to help people to go through. Through. Yeah let's go to the presentation. Presentation. Okay. Okay just to be sure it's PDF. Okay, it's loading. Here it is. Okay. Okay. So, yes, Flashback is an AI power code defecification solution. + +[04:00] So we want to be this unified interface for developers and companies to be able to diversify with centralized and decentralized cloud providers. We want make we want to make it cheaper, simpler and more flexible to you to deeper your data storage and in fact make the mic cloud journey something, that you can start at the beginning of your entrepreneurship or. When you want to start to build a project. So in fact basically we have the large vendors you know Google cloud AWS Microsoft Azure they are fast and reliable we know them well they have a lot of stacks. But the thing is, that they're very expensive I think we know we as a developer we know this me of. If you want to justify your please you just have to say AWS it's very well known very difficult to understand all you are doing your expenses is and at the same time most of the company. When they started to deploy with one provider they depose. So much solution on + +[05:00] the top, that they have this fun looking issue, which is the major issue today. Because it doesn't allow you to be great easily to another provider. So you're stuck with the bunder and the other side we have the decentralized solutions more affordable more privacy rel compliant and low cost. But think, that fon for virus. They're using the blockchain technology and they're using a lot of protocols and decentralized consensus, which makes the system more slower. And so at the same times very difficult for developers to know the tax and or to build efficiently with this solution. So yeah for a company very quickly at the beginning of the journey you can spend hundred hours even more spend a lot of money to deploy with one or two or three different providers to build your own infrastructure it can be up to 25% of your revenue you're going to + +[06:00] inject in fact to develop this at the same time DP technologies very interesting technologies. But it's only 0.1% of the market size. So I would say, that it's still a niche, that can be explored by people. But people do not have the time to explore them. So it is why we have decide to build flashback. So with flashback you can do a cloud divers it's cloud diversification platform. So in fact you're going to have three different advantages. The first one you will be able to reduce your cost. Because by deploying on flashback what you can do is indeed play with centralized providers quite expensive. But sometimes play with decentralized providers you can have this easy access to deep technologies. We want to make it very easy like you connect you deploy your bucket. with this providers, which are some specific services on the top to deploy buckets. And. Then of course we want to make the + +[07:00] mutual deployment seamless not only for large companies. But at some time for startups for five coders anyone, that want to indeed start at the beginning of the development of their technology to have a multi cloud approach. And so to not to get right the bundle looking issue. Issue. I think it's mainly for the main presentation. But what I can do is perhaps. If it's okay to make like a little live demo of the platform. Sure, let's do, that. Okay, let's go. deos is always fun. Live demo is always fun, you know. Yeah, it's let's see what's u Okay, I can share the screen and just get used to the system. just to be sure, that I will share my work. Can I share my work screen? Okay, let's go like this. It would be easier. Because we're going to jump between my spider and + +[08:00] Okay, great. So, So, this is the platform. Very simple. So with the platform of course you can read about the platform. So it's platform.flashback.tech and of course you can learn more you have a documentation I just open here the documentation you can go through the documentation a lot of information about indeed what we are building why we're building this you have introduction storage want to make it as transparent as informative as possible for people and of course you have some guides to help you to create your buckets repository configure with the system uploaded on the files. So today we are focusing on the optic storage, which is the first layer. I will say, that any kind of cloud solution. But indeed in the future want to have + +[09:00] more features according to what developers can bring us in term of feedback and information what they want to have in this platform. So feel free as well to give any kind of feedback. You have a lot of API references. If you are more in depth of indeed interacted with our flashback API. So we are trying to make it this as flexible as possible for developers and indeed being able to play with the platform in the best way. So let me of course you can sign up with your Google or GitHub account. You couldn't create otherwise you know email password it's okay and of course login. So in this case I'm going to log with my Google account just a few time and here + +[10:00] of course you have the platform. So the platform made we try to make things very simple to understand what's going on at the same time on the microord level. So the idea is to abstract the fact, that you're going to work with different code providers or to provide you all the information to understand how you can interact with different code providers. So you have an overview page where indeed you can have some links to the documentation you need and to start your journey with the platform. Some statistics transfer packet activity. So the packet activity is part of the cloud storage where we have the repository and buckets. So let's see the buckets. So I won't go deep in term of what is a bucket I think people who already are using no AWS or GCP or some S3 compatible solution can + +[11:00] indeed understand what is a bucket. So here in fact it's with we are starting kind of flashback buckets to connect your buckets from AWS or GCP to the flashback system. So very briefly I want to go far. But for instance. If you are using AWS you can have different buckets. So here you have like three or four different buckets and indeed you can indeed find this bucket here. So two connects. Connects. Let's see. If we can just cancel this one for instance. Deleted sure. If it's going to work. Ah okay. Thank you. So to connect a bucket you have different ways to connect your bucket. You can connect your bucket with GCP AWS Azure. So Azure is bit different. So every provider has its own concept of all you can store you can do object + +[12:00] storage with the cloud. So today we tested these five providers. You can find FCON. So we're using in fact through FCON we're using AAV, which is an S3 compatible provider on the top of Finch system is S3 compatible. But feel free as well. If you subscribe to the platform and want to try it to test the other S3 compatible providers. Yeah, you can try I know there is some pinata stuff like this. So feel free to get your API key stuff like this and to set up your system. But for instance. If you're using. And then IWS. So let's see, I'm going to give a label for my bucket and see look. I'm going to create a new bucket on my account. This way I can show you, that can work. So let's create a bucket on my AWS account. Oh, come on. You prepare + +[13:00] everything to be log and you're log out. Huh? So, let's see. If we can go fast. Sign inh. No way. See later. Just want to skip for now. Now. Of course, always set up a lot of security there. But yeah, let's create a bucket. Okay. So, we're in the we're going to create a bucket, that say like the thieves AWS buckets and PH blah We're just. So you can go through these different stuff depending on you're working with its. But of course for us and for the purpose of the demonstration we're just going to create the bucket directly. Here we are. And. So this field bucket I can just keep + +[14:00] the name here. So it's good to there is some information to provide of course in the platform to be able to set up your buckets. But you need let's say, that it's my VSS buckets fivebs here some stuff. So yeah. If I show you my secret and access key the I'm not sure I'm sharing my work screen, right? So, yeah, I can pause it for a moment. If you want. Oh, yeah. Just. If I can just this way. Yeah, in this way I can just perfect. Thank you, custom. Thank you for my privacy. So, let me find my keys and information like this. So there is a delegation system as well where you can delegate instead of indeed + +[15:00] setting up the API keys and the secrets where we have to store it and to split it in different servers. You can use a delegation system, which allow you in fact to not share with us this kind of secrets or to keep a kind of a better security for you and your privacy. Okay, just a minute. Just this one. Okay, I'm just going to copy paste. Just one second. Of course, my computer is slow. Okay. And this one. Here we are. Okay, you can share again my screen. I think you're custom. So you see my screen normally. Yeah. + +[16:00] Okay. We. So we said key secret key in this way. I can create my packets and too many requests. What did what did I do? Do? Okay. No worries. It's part of the test. Instant navigation. So what we're going to do is, that we are we're assuming, that you create your account. You can do, that with GCP as well. So same for GCP you have all the information to provide and according to what you want to provide it's I have a sound from my side Kirsten I don't know. If you have the same you have one okay no I have a strange sound in my ear anyway. So depending on the different information you have to provide. So Google cloud storage is different information. So of course the best is to refer to create the bucket for instance to get the information or + +[17:00] or to create the bucket. So there some information to provide to connect your bucket with a solution. So once you indeed provide your bucket. So it's create your bucket you have this status in fact we're using a technology, which called the bridge node. You can as well find some information in the documentation. So we explain what is the bridge node and indeed the purpose of the bridge node. So we have some region we're covering. So the bridge node is mainly in fact a system, that allow us to interact with the different providers. It's our AP technology and it's where in fact we can get and collect different metrics of your different buckets. And. So of course once you create your buckets you can create what we call a repo. So we have created I created this morning in different buckets and of course you have this repo. So. When you want once you want to + +[18:00] create a repo you can call it any names you want like repo 6 five like this you can add one or two packets different. Because let's GCP one create okay I should disconnect in fact I think. So anyway. So I continue say, that you just yeah it's anation. But it's okay it's part the yeah it just went live. This this just went live and you already told me, that anyone who use it you would appreciate feedback. Because it is in the very early stage of being live. So. If if any of you try it out I know feedback is very appreciated. Appreciated. So these things happen. Exactly. We have a button to give a lot of feedback. So feel free to give any kind of feedback. Indeed it we started to develop this in fact + +[19:00] just straight after the. So all this concept since February. And then we want to add this Stellar concept of the pin we have. But we can come back on it later. And so in fact. When we once you have a repo in fact and the repo allow you to indeed have your different buckets. So you have the GCP AWS buckets and you can create to API keys. So here I create an API keys, which allow me in fact I will try to make it a very simple demo. If it's working to test any storage. But indeed for instance. If I want to store. So I have a repo I provide some access key secret key. So of course I did it in JS and stuff like this and of course. If we can make this little example we are going to transfer this genus bill. I + +[20:00] took the text of the genus bill, and now we're going to create the genus bill and copy eight. So let's go. Here we are. So right. Now we're storing with an S3 endpoint to S3 buckets. So we can see, that we store something. We can always check, that is working. But. If we're looking the second AWS buckets, we should probably see this. We have a lot of test P8, which is here. But of course what is very funny is to see. If in our you know Google buckets from an S S3 endpoint we can get something there. So we're going to copy paste in term of bucket name, which is great. Because GCS and AWS is using some same wording. So let me go on the platform. You can find in fact duplicate + +[21:00] names directly on the report details. So. So this one is the GCP one. I'm going to copy paste and here we are. Good. Cut and we're going to send this same copy to the GCP. So we have an S3 end points and we're sending to DCP. Okay, thanks to in fact this bridge node, which is this address. Okay. So we can check it is do don't log out please. Okay. And here we are. So of course alpha vision right. Now I just show you with an S3 endpoints to store with + +[22:00] GCS and AWS. You can try with Azure through the tutorials. You can indeed provide some feedback. If it's working or not. It's a truly alpha vision. We're going to do a more stable vision in September with indeed more better UI and some improvements such as the private bridge nodes. Because today the bridge nodes are hosted by Flashback. But the idea indeed is to let you to be able to deploy this node in your tenants. And. So to manage your strateging my screen should work like this. Okay, perfect. So it's globally all about the presentation about this little live demo with all the bugs we got. But + +[23:00] yeah, of course it's still an alpha vision, an improved beta vision for flashback and our road map is very simple. We want indeed stabilize all of this. It's always a complex features. If it was simple you will have this everywhere. So we decided to take this challenge and thanks to Stellar, we have this chance to indeed push it with Stellar. We have this in the Stellar community fund for the one who are interesting. There is in fact a second product and feature called the flashback DP where in fact we as you can see in fact with this platform you interact with the platform with the service level of cloud providers. So of course it's still the services but. If you want to interact with I would say data centers or in a higher privacy model you the flashback DP will allow you in fact to interact + +[24:00] with the data center is going to ask is hardware a software connect is bridge nodes. And then connect to the DBN marketplace. And so the Stellar is going to play a big key role there. Because we're going to deploy all smart contract with Stellar to indeed manage as marketplace as the same times to collect thanks to our bridge nodes to our smart contracts all the quality of services agreements and all the metric related to, that. So in the marketplace what you're going to find as a consumer you're going to connect to the flashback DPIN platform. You're going to find different offers from different providers. You're going to select the ones you want. Then these ones are going to give you thanks to the bridge note, which is S3 compatible and stuff like this. We believe, that they're going to use you know like system like Mio. So they are going to give you an API keys and stuff like this. So you can + +[25:00] already interact with in S3 in a very easy way and of course. Then once you decide and you have the right agreement with the storage providers we deploy all of this in the smart contract. And then we operate and the bridge is going to say okay this is the quality of services of the storage with the consumer speed I mean upload download speed latency stuff like this and according to the quality they agreed to increase the reputation score of contract on earth as a providers as well. And then it's where you start to have a bit of tokconomics and stuff like this. So it's what we want to achieve with Stellar and normally we should have the first trench one delivered within the next weeks. So it's our next goal right now. That's super exciting and I think it's really interesting. It's an interesting use case for smart contracts. just before we went on + +[26:00] the call here, we were talking about there's a lot of focus on DeFi applications on chain and more like working with tokenization and real world assets and. But but this is a very to me is a very untra untraditional blockchain project. But. But you have taken some of the elements and applied, that in a project, that or a product, that you don't usually associate with blockchain and I think, that's super interesting. Love to see more of, that. It's it's interesting to see these kind of use cases where you are thinking huh, that was not obvious to use blockchain in, that way. But it works really well. So it broadens the scope of what you can do and how you perceive blockchain. So yeah, super interesting. Interesting. Yeah, thank you. Thank you. Indeed, it's I would say, that for the ecosystem. If people are very curious about this kind of technology, it's the deep technology finally. So + +[27:00] the central physical infrastructure network some people are indeed deploying their own blockchain and their own protocol stuff like this. For us, we prefer to use the existing blockchain and to build in fact with smart contract. Which is great with Stellar is in fact the flexibility. I mean we first in Rust, which we have a preference in rest. Sorry Goland. But yeah we have a preference in Rust. The Stellar kit is very nice and the thing is, that with all the programs you're doing like with the university is great. Because indeed you can discover the ecosystem and to you can truly develop something real. Great. Yeah. I don't know. If there's any questions from anyone watching. But otherwise I say this was this is super great to see this demo. I remember talking to you about this about a year ago and I + +[28:00] thought this is an interesting use case. I would love to see, that in real life and here we are. You launched the first version and yeah I've been playing around with it and I think this is super interesting and everyone who plays around with it. If there's any. If you find anything u I think I know, that you're very u you're very appreciative of feedback. It is the it is early days of the platform. So yeah, please forward any feedback you may have. I think we have a question here from Matias. It also raises awareness from other nonblockchain orient corpse and orgs to start thinking of integrations to make things more efficient. Yeah, I think this is a good example of, that. Exactly. Exactly. And I think we blockchain is AI cloud storage it's very demanding in term of I think, that's put for the blockchain. So it's why it's maybe comparative is not. So developed + +[29:00] because there are still a lot of things to do around blockchain in term of scalability to make this kind of use case more and more applicable with the blockchain. Yeah, we see a lot of focus on AI not just on tooling. But also like implementing both blockchain and AI in the same application where it makes sense how are you using AI. I know. If I see on your website you do mention AI. So in what way are you using AI in your application? Application? Yeah. So it's true, that. If people go through our website we don't do not mention. So much about decentralized technologies and stuff like this. And indeed. Because it's like for us a kind of motor and people you know see the design of the car and not specifically the motor inside. But in our AI we are going to I mean from the AI we're going to develop we have different steps of the we're going to have the first step is could be a chatbased recommendation system. Because basically most of the + +[30:00] developers v coders even exper people with expertise sometimes needs do as well to see how they can leverage in the best way the platform. So we're going to guide people. So chart to understand what they are looking for in term of multi cloud configuration. If they're looking for a micro configuration. If they have some credits or we can help them to reduce the cost and sometimes to take all the benefits of this wide ecosystem, which is extremely fragmented today. And. So it's going to be the first version of AI and indeed with the use AI needs data. And so AI can't do a lot. If you don't have a lot of people using the platform. So it's why I'm pushing people to indeed try and use the platform. Because this AI is going to indeed learn more about your patterns and more learn about all you can do with the + +[31:00] platform. And then improve your recommendation in real time saying like oh yeah you are using this kind of storage, you're doing everything in odd storage with AWS. While in fact we can see, that 50% of your data are not used in a frequent way. So you are just being a lot perhaps you can just be great part in StarJ or FON and reduce the cost. And so it's where the AI is going to intervene in your journey with the platform and what we want to achieve with AI for decentralization. It's you know decentralizing AI is always complex is more perhaps what we are looking at right. Now is perhaps on the privacy level perhaps on providing all the is doing certain recommendation stuff like this. But right. Now the decentralized AI is still kind of a baby in the crypto space. Yeah. I noticed something you said, that you're not really advertising. You don't really write, that you, that you use blockchain. + +[32:00] and I think, that's actually it's interesting. Because I see blockchain as a technology just like many other technologies. It's not necessarily, that the users don't really it's information, that if. If you have a really nice imple implementation, that doesn't really have where users don't have to jump through a lot of hoops, that we usually see in a lot of blockchain applications. It's not really relevant. How you choose to solve the problems is not you use technology. In this case you use blockchain. I love to see it be more normalized, that blockchain you don't need to put it up as a big banner you're using blockchain like more normalized using blockchain in different kinds of applications. So I think, that's, that's interesting yeah we have a question more from Matias he asked what you think about awe + +[33:00] a good question aweave is extremely nice in term of volat of course it's made for this I mean it has been designed for this I know, that there is they're doing more and more kind of development on the top of the protocol there right. Now to indeed diversify with other storage type like storage and or at least storage perhaps our wave is a bit expensive from my u point of view specifically. If you want to store NFTs. So you know very small portion of data the cost for a terabytes is very specific application at least for the main the core protocol of code I know, that they're working on different angle and trying to simplify or doing what the cloud is doing finally. So I think the next evolution they're going to do can be good I think it's + +[34:00] they got the same issue, that Fon got in the past. Because Fon did this protocol very interesting consensus by the way. I mean it's very the math behind it is it's beautiful. I mean they did something very nice zero knowledge applic too slow and I don't know all people know about, that. But you have to. When you store your data with fon you have to unlock a portion of gigabytes you have to seal it. And then in seal it. So this mechanism, that prove, that you're somebody hosting your data at the same time it's very slow to retrieve the data. So they have to work on a lot of systems and you know to accept certain central centralized process to make it faster. But anyway it's part of the blockchain journey I believe, that more + +[35:00] and more the blockchain is going to evolve the best it would be in term of u decentralization for storage and computing and more great well thank you so much for joining today for me It's it's been fun to be a tiny part of this journey. I've followed you along the lines since we on the side since we met first time last year and see, that you launched the product and yeah, it's great to see. So everyone please go check it out and yeah, thank you again for joining this week. Week. Thank you very much guys. Thank you everyone for joining. We'll be back again next week. + +
diff --git a/meetings/2025-09-25.mdx b/meetings/2025-09-25.mdx new file mode 100644 index 0000000000..255ef7510b --- /dev/null +++ b/meetings/2025-09-25.mdx @@ -0,0 +1,164 @@ +--- +title: "Protocol Discussion" +description: "Review of CAP-66 and CAP-67 updates plus a walkthrough of CAP-71, CAP-72, and CAP-73 proposals focused on delegated Soroban authentication, contract-based signers for classic accounts, and programmatic trustline/account creation from smart contracts, including performance and fee/resource considerations." +authors: + - carsten-jacobsen + - dmytro-kozhevin + - emir-ayral + - siddharth-suresh +tags: + - developer + - CAP-66 + - CAP-67 + - CAP-71 + - CAP-72 + - CAP-73 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This protocol session evaluates proposed updates around transaction performance, fees, and resource limits, alongside a deeper technical walkthrough of several CAPs aimed at improving Soroban authentication and expanding what smart contracts can do with classic accounts. The conversation centers on making advanced authorization patterns easier to simulate, cheaper to encode on-chain, and more consistent across account types. + +A key theme is “delegated auth” and contract-driven account behavior: enabling custom accounts (and eventually classic accounts) to rely on contract logic for verification, while keeping authorization context intact and avoiding transaction bloat. The group also covers closing long-standing gaps like creating trustlines (and funding new accounts via transfers) directly from contracts under clear authorization semantics. + +### Key Topics + +- Evaluation thread covering changes to `CAP-0066` and `CAP-0067`, including benchmarks and next-step planning +- `CAP-71`: protocol-level delegated authentication between custom accounts + - Motivation: current delegation via calling `require_auth` inside `__check_auth` is awkward for simulation and can duplicate auth entries + - Proposal adds a delegated credentials structure to reduce duplication and keep auth context consistent + - New host functions to support delegation flows, including a contract-side mechanism to inherit the same signature payload/context when delegating + - Notes on needing a new signature payload variant so delegated signers explicitly sign for the delegating address +- `CAP-72`: contract signers for classic (G...) accounts + - Adds a new signer type stored alongside existing signers, with weights like standard signers + - Delegated/contract signers usable in Soroban contract auth flows (not for signing classic transactions yet) + - Introduces an implicit “G account contract” interface callable from Soroban to manage signer weights and thresholds + - Account-management operations from Soroban require high threshold (aligning with classic account management semantics) + - Discussion of base reserve implications when adding/removing signers (subentries affect spendable XLM), and why sponsorship isn’t part of the initial approach +- `CAP-73`: allow contracts to create/modify trustlines and handle missing balances + - Adds `change_trust` and `has_trust` style functionality for asset contracts to manage trustlines programmatically + - Designed to close a feature gap where contracts can’t create trustlines/balances for classic accounts today + - Authorization mirrors classic semantics (trustline owner must authorize changes) + - Notes about typical trustline limits being effectively “infinite,” with discussion about possibly simplifying limits if usage is negligible + - For native XLM: addresses the “non-existent account” case via account creation on transfer with minimum balance requirements +- Ongoing open questions highlighted in discussion: + - Final interface details for managing delegated signer types cleanly (avoiding ambiguity/confusion) + - Tradeoffs between protocol-level support vs. wallet-side simulation complexity + - Where these CAPs may land in protocol scheduling and how they might be phased + +### Resources + +- [CAP-0066](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) +- [CAP-0067](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) +- [CAP-0071](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0071.md) +- [CAP-0072](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md) +- [CAP-0073](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0073.md) + +
+ Video Transcript + +[00:00] Great. hi everyone. sorry, Carson, I said you would start up. first, can you hear me? Well, please let me know. write in chat. all right. Great. okay, cool. So, hello everyone. today I'm going to present a few CAPs. As many as time permits and I have three total and all of them were kind of spawned from Le's proposal, that was linked to one of the CAPs and the idea behind the proposal was about extending our capabilities for the classic star account or G accounts and the idea behind it was to + +[01:00] add a capability for IG account to have a set of contract signers. So one contract signer and. If you have a contract signer you can really have arbitrary authentication logic with arbitrary policies and stuff like, that. But in addition to, that you could also add a capability to manage trust veins for example from soraban, which closes even more feature gaps. Because this proposal as it is it won't be possible to use this C standards outside of Soraban. Because we currently do not provide a way to run contracts outside of the transaction apply for with, that like Jay is not hearing me. I don't know Jay seems like people still hear me. + +[02:00] so, that's kind of high level motivation behind, that. But I'd like to actually start with [CAP-71](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0071.md), which is something I figured. While thinking about how this whole GI account customization thing could be implemented and this CAP is about adding protocol level support for delegating authentication between custom accounts. Yeah, thanks Marius. It's actually not a new mic. It's just Discord settings. Which is funny. So [CAP-71](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0071.md) and motivation for, that stems from several different issues. And basically currently what is + +[03:00] possible to do in Soroban what some of you may know there is this require O function, that requires authentication authorization from an address and this function can actually be called from within a custom account check O function, that performs as well. So this is kind of the current way of delegating not. So you have an account. But instead of verifying some signature you say I try this other contract to perform verification for me and this has some benefits for modular custom accounts and it is like a cornerstone of this proposal for G accounts to have C signers. Because it's effectively the same kind of delegation right. So instead of having a cryptographic signature for a G account we say you don't actually know what the signature is we let a contract to perform + +[04:00] verification instead on its custom signature format. But this mechanism. While it kind of works it was not like built as a full-fledged feature in the protocol. It's just a byproduct of the design. Design allows it. That's not visible out. Because there's no reason to dissolve. But just kind of inconvenient to use and also not optimal to use. The inconvenience factor lies mostly in simulation. So. If anyone has tried it, they would quickly learn, that you cannot simply simulate this. Currently you need to build for the delegated contract call you need to build its own signature payload. Which is kind of tricky and this could in theory be fixed the simulation where, although it might be a bit complicated as well. + +[05:00] but yeah it would still be kind of annoying. Because you would need to do several simulations and all kind of propagating more and more authentication information into the system. And yeah, I think it is still kind of annoying. But in terms of optimality as well there is quite a bit of duplication here. Because like you spawn new authorization entries like for example. If you have two accounts and one delegates authentication to another one. Then you would end up with two authorization entries in transaction, that increases transaction size and increases complexity for building it and the tipping point for me for why I came up with CAP is the context like the some of the proposals, that I've seen + +[06:00] for the delegation for custom accounts maybe do not use context much but. If we were to do this for the G accounts we do not want to lose the authorization context, which is basically the list of the contract codes, that have been authorized And the issue with the approach of calling require O from within check O is, that you could pass context to the require O call. But it would not look the same as the normal context, that you would have during normal authorization, which means, that for example let's say you have a an account implementation, that does something about the context you cannot simply use it as a delegated banner of a G account for example. Because the context will be kind of in messy nested form like it will be + +[07:00] an argument of a call instead of a list of the call. So it is kind of messy and I think it is going to be quite annoying to maintain this mess in the protocol like it creates some inconsistency between like how different types of contract signers can be used. Which is basically the motivation for this. But it also solves the issues I've described before. Oh yeah, and also. If you want to have the context you will need to attach it to the transaction as well, which is like even more duplication on top of it. But you have like besides like specifying multiple authorization entries for every address, you would need to specify like the whole call stack as well for every address. But in different forms. And it's a lot of mess. Mess. So what do I propose to do about this? + +[08:00] it is actually not, that big of a change and it consists of basically two parts. One part of it is on the XDR side as can be seen in the CAP. And what we do in the XDR is we introduce a new type of credentials, which is basically this part of the authorization entry, that contains signatures and stuff like, that. And the credentials are pretty much same as robban credential of address. But they have an additional field called delegates. And each delegate is just a strct, that contains an address and a signature C value, which can be an arbitrary value for one can parse. And it also has a list of nested delegates. In case. If you call delegated + +[09:00] accounts from within delegated account and the cool thing about this exr is, that you may have a single authorization entry, that has as many delegated signers as you need. So there is no duplication at all like there is only necessary information. Information. So in the simplest case for example right we have an account, that asks another contract to perform us on its behalf and this will be represented as a single authorization entry, that has the main account, that we are performing O for in the credentials at the top level and the contract, that actually performs O and delegates array and we only store its address and its signature. And this structure is very simple and like. If you get a result from this + +[10:00] like it's usually up to you to fill in the credentials anyway. So. If you are dealing with delegation as a wallet, what you need to do is just build this new type of credentials. It should be pretty simple like it's only a couple fields more than you would need to fill for normal or entry and there is no like this messy propagating the O data into simulation and stuff like, that is basically kind of a way to do multi-IG. But instead of doing multiple signatures you have multiple like sign implementations on the high and there is one more small change, which I will talk about a little bit later. And from the host side we add two functions, that actually allow contracts to interact with the delegated + +[11:00] signers. The main function is delegate count O, which is very similar to require O. But it can only be called from within a custom account from within check O reserve function. And it doesn't take any arguments. And what this does is something similar to referrals. But instead of like just authorizing the current call arguments and current call, instead it will inherit the context and the signature payload from the current check implement from the current checkos invocation. Invocation. So basically you have a check off. And then you have another contract, that you want to perform checkos for you and, that other contract will get exactly the same check arguments and its own signature. + +[12:00] which is really cool. It allows like all the delegate centers to sign the same payload basically and have the same context. So there is no messes like this nested context in argument and stuff like, that. So for every contract it would look like as. If they were authorizing the call themselves. While in fact they're doing this on someone else's behinders present for the current cost check. And the motivation behind this function is simply, that. If you were to use this feature you would have an issue of somehow attaching this delegated signers to your signature payload. Like you would need to list some signatures or something like, that. And like since I would imagine + +[13:00] a few contracts would want to use this and it's kind of annoying, that everyone needs to reimplement this and come with their own UDS and stuff like, that and in since we already have the delegated XDR we just add a getter, that returns this delegated attached to the transaction and matched to the current. So you can get them. Then you can make sure, that they are actually the centers for the account. And then you call deate account on for them. So there is no need for some bespoke representation of this deate center. They're kind of building feature, that you may use and you don't need any data structure or app. And again, this reduces duplication even further. Because like. If you fully delegate your O to another account, you do not need the signature for your main + +[14:00] account. Account. All right, that's it on the high level for the host functions. The CAP has a bit more detailed description of the exact algorithm or how does it work. But I think the simple summary. If it is really, that just make sure, that the arguments look as. If you've been authorizing the require call at the top level and the last small thing, that I've mentioned before in the XDR is, that unfortunately we need a new type of signature payload, which I think is like the main downside with this approach and the motivation for this is, that the current payload, that you need to + +[15:00] sign to for thoraban is it does not contain the address for, which you're signing this payload and in the current O framework it makes sense. Because it is implicitly tied to the address. So address is signed. But it is signed in a different part of the payload. But with a delegation like you need to explicitly sign for the address, that is a top level address, that delegates everything. Everything. So basically without, that this whole thing would be insecure. So, that's kind of the annoying divergence between like the different credential types. I think. While it is definitely a downside it's probably not as big. Because it is it has one to one mapping + +[16:00] to the credential right. So. If you do not support delegate credentials, you don't need to worry about this. If you do support delegate credentials, you don't need to worry about this. But the criteria for picking the right signature, it's pretty straightforward. Again camp has more context on this issue. Yeah. So I think it is more or less it at the high level. CAP also has a proposed simulation flow, which is kind of universal for any world, that wants to use delegated accounts. Sorry J accounts are this you can do this for J accounts. But there are some custom accounts implementation around, that. But also delegated account, which I know there could be like know Zeppelin has been working on something similar + +[17:00] and I know has experimented with this for a. While but you know. If you like a lot of convenience and to link your well also simplifying and optimizing the transactions Right. We'll CAP the trail for Yeah, we will get to CAP stream later. But probably answer is yes questions. Questions. So any questions? No designers to send G transactions. + +[18:00] that's [CAP-72](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md) and not yet. But it is a step forward in, that direction right. If we ever allow CERS to sign transactions and G accounts with C signers will also be able to do, that's again we can talk about this a bit later yeah, it is definitely stack or more interoperability between GNC accounts in general. + +[19:00] I know, that Lee had some concerns about [CAP-71](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0071.md) specifically in terms of like about yet another protocol feature. And I do not disagree, that it could partially result with simulation. But as I've mentioned before I think doing this at protocol level very beneficial especially. If you go ahead with [CAP-72](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md) and does not only like improves ax. But it also simplifies the transactions, which is not something, that simulation would allow us to solve. I see is there no question for please feel free to chime in the discussion thread. If you come up with something + +[20:00] yeah I think, that let's go to [CAP-72](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md). Because I can see, that folks are excited about it. So again 71 is kind of what I think is a prerequisite to 72 just to make it kind of not happy out of the box. So [CAP-72](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md) is about the mentioning contract signers for scar account for T account and motivation this gap it's this in the original discussion as well. But basically well to allow you to do difference in this existing CH account. Because G accounts still have + +[21:00] quite a bit of adoption rate and this allow to extend their functionality arbitrarily. It can be new ways to sign for operation. Operation. It may be a way to implement new policies and stuff like, that. It is really very open-ended. I think there is a lot of demand for passes. It is a pretty hot topic and an argument can be made, that maybe you can support this natively as center. This is not the part of [CAP-72](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md). But I don't know I'd like to gauge some interest maybe another discussion can be started. If someone thinks strongly, that has to be a feature on a G account. Now + +[22:00] of course. If in the future we support signing for transactions with C account this will become a nonissue. Because you will be able to do anything with a C signer, that you can do today with a normal T account. So well yeah I don't want to spend much time on motivation here. But I think it is pretty significant factor like the ability to u modify the account like a lot of things to do this. So what do we do for this? The first thing, that we do is we add a new ser type, that is stored in the account entry in a similar fashion as any other sign and it is called the dated serer + +[23:00] which is basically how it's it connects to [CAP-71](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0071.md) right it's also delegation. Because unlike u the cryptographic centers for example, that exist. Now they're just public keys. Get it center is a contract, that defines its own logic with its own cryptography and stuff. The every signer has its own weight in the same fashion as normal planners have weight. But the caveat for. Now at least is, that logated centers can only be used in the smart contract environment. Because well they are contracts. You cannot run contracts outside of smart contract m environment just yet. + +[24:00] so unfortunately you will not be able to use the seat sign to sign the whole transaction. Transaction. But we can do anything in carbon and signing the transaction will work. If we ever support signing the transaction with a C account as well. So basically. If we support running the contracts transaction validation time, which I'm not saying it's necessarily going to happen. But it's definitely something we are thinking about. About. Again B I'm sorry about, that. Okay. So besides, that what we do is, that since modifications is a sorryation since operations the C accounts can only happen. But one notable use case we + +[25:00] been thinking about has something to do with various account recovery flows right with using pass key or there are other custom logic to for example recover the account. Account. This use case needs an ability to modify G account from Turaban. And we provide this ability this new type of built-in contract, which I coined as GAC G account contract and unlike terror asset contract stack, which has to be explicitly instantiated GAC is implicitly it implicitly exists for any G account on chain not need to do + +[26:00] anything to interact with it from Soraban you will be able just starting from a new protocol to call contract functions on a G account just really convenient and it is well overhead solution in terms of storage okay so, that's kind of a high level overview. Now go into a bit of detail. So in terms of the standard changes as I've mentioned it's very straightforward. We just extend the existing ser set of signers for the G accounts and we just them in the G account. They're not supported in some more exotic context like in some operations and stuff like, that but, that's a minor detail + +[27:00] and. Then the GE account interface as well specified in the CAP and it is still up to debate the CAP as it is proposes to add an ability to add any type of address as a delegated signer, which might create some ambiguity like G account is a delegated center of a G account. But it's like G account itself is a public key. So we have kind of the same identifier, that may be in two different context and it may not cause some issues and confusion downstream. Downstream. So, it is still up to debate. Like. If you decide to not go with delegated G accounts, the contract interface may be simplified a bit. To + +[28:00] manage signers just based on the address type, G signers would be always cryptographic signers for example and C accounted signers. That would slim down the interface a little bit. But anyways, the current proposal is just to have a pair of functions to modify a weight for a ED 25 signer or remove it. Then the same pair of functions to modify weight for delegated signer or remove it. And. Then two functions, that just manipulate the weights of the account. It's not like strictly necessary. But this seem like a logical thing to have. If you provide the basic account management capabilities anyway. So just allow to modify the master weight and update the thresholds for the + +[29:00] account. Account. So these are just for completeness and not strictly necessary. So for the classic transactions as I mentioned the derated sus will not be supported at the moment. So transactions, that try to use them they will be rejected and there's not even a way to kind of provide a signature for a delegated. But for the smart contracts themselves we extend the building functionality, that performs authentication for the G account and this extension leverages 71 one where we would call the functions behind there, which will enumerate all the delegated signers existing in the current context and we will call this + +[30:00] new Jate account of house function for them and we'll obviously use the XDR format for the payload defined in [CAP-71](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0071.md). So it's directly benefits from it And another caveat is, that current management operations, that in this CAP u are they must have a high signature threshold to match the semantics of the set options operation. So it is something new, that we do. Previously we always used medium threshold for all the Rob operations to all G accounts. So, that's a small change to use high threshold for the account management operation. And I guess the only somewhat tricky part about the implementation of the G + +[31:00] account contract itself is, that we kind of do something new for Soroban. Here we had a way to manage classic entries from Soroban. Not not just manage them. But add or remove new entries, which hasn't happened before right before the only thing we did with classic was we update the trust in balances for the account balances. Now with this CAP we will also modify subentry count this sub entry count, which affects the page reserve for the account and. If we remove a signer we also might need to remove a sponsorship from this signer This is all like perfectly reasonable to do on this robust side. It is a bit of work to kind of properly maintain the variance. But on the high level, yeah, I guess the main concern here is + +[32:00] that, that account has to authorize its modification and it obviously does, that by authorizing the G account contract function itself. So on any call with I said you need to provide a signature, that matches the highway threshold and besides like performing the modification as well also authorizes modification of the accounts reserve and sub entries. The only difference between this and classic is really, that you cannot create new sub entries using some sponsorship. Because there's not even a way of setting up sponsorship. So. If you want to manipulate a G account with GAC, you need to have enough base reserve. You cannot use someone else's sponsorship. Which I think is generally fine and + +[33:00] someone wants to sponsor a G account they can do, that by for example performing an XLM bundle with G account modification or something like, that. Yeah. So I mentioned already, that every G account function performance requires and it will require high signature threshold and I think it is pretty much it for the high level overview of the CAP. I think yeah I think I don't have much else to talk about like from bit more details in the camp. But I don't think there is anything too significant and the main thing, that not the main thing. But at least one thing, that I'm still not sure about is a final interface like what we do with delegated + +[34:00] G account standards. But I think this can he can offline especially since Lee is not here and had some concerns about representing stills. And so on. Yeah. So any questions? Yeah, speaking of string keys, by the way, way, I do not think you will need string keys like an E string key for this one. I hope at least. So you will just have a G account and it can have a C signer. So, that's probably direction we'll end up in. So know your account time. Yeah any question. + +[35:00] Okay. I think I don't see any questions for now. It's going pretty fast. Again, I would say like. If anyone has any thoughts or comments, please feel free to chime in the respective camps. And Mat is typing something. I'll wait for a few more seconds. Okay. So, close from the CAP. This has a somewhat naterization in selling balance of the account for hasn't been for. So why this is the + +[36:00] case this is the case. Because classic. When we create a new classic entry or sub entry such as an account signer we need to reserve base reserve from the accounts, that owns the entry. Okay. Best reserve being.5 XLM as of today. Right. So it is not a strict withdrawal of the fee, right? We not like in fe or anything for, that rate. It just increases just reduces your spendable XLM balance on the account. It is just like we cannot bypass this mechanism, right? We are doing the same things, that we are doing on classic based reserve and just doing it from soran. Now and I guess the only intuitive non intuitive part about this is really, that this hasn't happened before. But I feel like + +[37:00] it's not super offensively counterintuitive. Because when you authorize an account operation you basically it is like a high privilege operation anyway and you acknowledge the consequence quences and one of the consequences as well your days reserve can be increased on the account. So you can spend less XLM. Now it is the same as classic no way around this I looked into an alternative where like extra standards could be stored in the robite. And then you would need to pay rent for them it's not like it removes the fees magically it just moves fees in different direction. But automatically I'm not sure. If the approach other approach or is centers and Soroban is sources. Because creates some new interactions in classic st, that going to be kind of tricky to + +[38:00] implement and maintain and are generally much more invasive than what proposed in this CAP. But yeah. If anyone is curious, they may look up the history of the CAP. Because it's the first version, that was merged. All right. I think we have like 20 more minutes. So why not go through [CAP-73](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0073.md). While we at it? And [CAP-73](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0073.md) really supports [CAP-72](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md) on one hand to allows the accounts to sign for the trust l creation. But also in general it just closes the feature gap, that has been there from the inception of soraban and I think some people are very well aware of it and it happened I guess just in some flows, that do not involve from custom o or + +[39:00] anything. And the issue is basically, that. When you're doing contract operations in Soroban you cannot create new balances for the G account and new balance being an account entry itself for the XLM token specifically and a trust line for any other token and it is like not completely blocking usage of sack right for G accounts. You can ask the users to set up the trust point first. But there are contexts where you may want to do this programmatically. There is of course a capstone 72 context. You may want to use your C account logic to manage your trust lines. And it also helps to kind of bundling this with some programmatic operations. + +[40:00] such as for example an example I saw recently from Tyler week. With, that you would be able to for example create a new classic asset effectively and create a distribution account transfer asset to the distribution account distribute later to the users all in the same transaction, which is pretty convenient. So yeah I guess there are convenience there is convenience factors there is a factor of really closing the gap and factor of being more compatible with C designers of J account with [CAP-72](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md), that's the gist of the motivation and the way it works is via two new functions for this to asset + +[41:00] contract. Contract. These functions are change trust and has trust and again the exact interface is still a bit up to debate. But as it states. Now the change trust function operates in the same fashion as the forget the name of the operation classic the I think it's called change trust as well in classic the operation and what it does is basically sets a trust line limit for a given address. If the limit is zero. Then the trust line is removed. If the limit is non zero and the trust line hasn't been there. Then created and of course. If it is there it can be just modified. The question is whether we need to manage limit at all. I did some analysis among the trust lens, that have been + +[42:00] active during the last year with 98 plus% use limit trust and limit over one to the power of 18, which basically represents infinity. But. If anyone is aware of any use cases for limits, that are low please let us know u. Because we might get rid of it for the sake of simplicity. Because yeah it is not really obvious. If it's necessary and for this programmatic manipulation of trust twines. Really likely, that only binary modifications would be possible. Because if you need a non- infinite transpoint limit, you're doing something super specific probably and it seems kind of weird, that you would know, that the wants to do, that. It seems really unlikely. Unlikely. And another function has trust just + +[43:00] checks. If an address has a trust line. And the purpose of this function is really to make for better programmability of trust management. Because change trust operation requires authorization from the address, that the trust lane belongs to. And for example, you are performing a DSD transfer to a new address, right? And normally you would only need O from the transfer source. But. If you wanted to create a destination trust lines. Then you would also need us from the destination as well. You cannot just unconditionally call change trust to make sure, that the trust point exists. Maybe you could. But then you know you could make it. So you don't need to require us. If well it's not verified is really something up + +[44:00] to the further discussion how this interface looks like. But anyways this is the current proposal. The h trust function really helps to check. If you actually need to call change trust or not and thus whether you need to require o from the receiver of the balance or not. Which is kind of convenient and. If you wanted to call this from Z contract similarly to [CAP-72](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0072.md) this modifies base resource and again in the same vein we cannot avoid, that. Because it's classic semantics and we require authorization on the change trust operation in same way of classic. So the base reserve modification ation is just yet another thing, that you're authorizing by signing this u and it has the same + +[45:00] downside of saying about it going, that sponsorships are not available. So. If you want to create a trust line, you need to have sufficient excellent balance for it. And for the excellent specifically there are no trust lines and instead just create a new account. Then an excellent transfer is performed to a G address, that does not exist yet. And this account will be funded with this newly transfer balance. The only caveat is, that the minimum transfer amount here is one XLM. Because that's the minimum balance, that H account may have. If the balance is less than one XLM the operation will fail as it does today. Well it will fail with an error. So yeah XLM is arguably much simpler. Because it does not require any + +[46:00] , and yeah, it just is very simple in this sense. and yeah, I think, that's pretty much it on your head over, that's a pretty simple CAP. yeah, the only tricky part about it is really handing down the interface. Because yeah we could make it much simpler. If you went for like binary either zero or infinity trust. But we need some signal whether it is too restricted or not. Okay question in classic one of the reasons sponsorship were added was to print accounts created by wlets from being merged by Z party for the base reserve. Is it a concern for creating accounts on transfers of sponsorships? Sponsorships? I am not sure I understand the question create an account on transfer + +[47:00] why would you isn't, that how it works for classic as well like. When you create an account you it's like an excellent transfer maybe I could go to this page I cannot. If you're talking Oh no, + +[48:00] that's unfortunate. Yeah we can discuss this I think I am still not sure I understand this maybe it is some bit of classic protocols, that I'm not aware of I want to count out on currencies CAP, well, technically the Okay, I think it's technically the transpoint part and XLM part are kind of independent. I bundled them into the + +[49:00] same CAP. Because they really serve the same mission of like dealing with non-existent G account balances. So I thought like it makes sense to close this gap all at once and not just for a part of the site balances. But yeah. If we figure out, that something is much harder to do or much easier to do, we could build this up further. But given how simple it is, I would hope we could do both atomically. So yeah, that's recaped. I guess the only one thing to say for. Now is, that we are still not sure, which protocol will this go into this get through. So likely some of the CAPs will come a bit later, later, may come sooner. So just stay tuned for, that. And yeah, please feel free to leave any feedback, suggestions in the C + +[50:00] discussion. Discussion. thanks Ariel. + +
diff --git a/meetings/2025-10-02.mdx b/meetings/2025-10-02.mdx new file mode 100644 index 0000000000..5fd92ca9be --- /dev/null +++ b/meetings/2025-10-02.mdx @@ -0,0 +1,136 @@ +--- +title: "BN254 Host Functions and ZK-Friendly Hashing" +description: "Protocol discussion covering CAP-74 (BN254 primitives) and related SDK/cryptography needs, including ongoing feedback on CAP-66 and CAP-67, plus early design exploration for adding Poseidon-style hash primitives for ZK and Merkle-tree use cases." +authors: + - bri-wylde + - dmytro-kozhevin + - jay-geng + - siddharth-suresh + - yan-michalevsky +tags: + - developer + - CAP-66 + - CAP-67 + - CAP-74 +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session reviews several upcoming protocol and developer-experience changes, starting with CAP-74 to add BN254 (alt-bn128) primitives to Soroban for better compatibility with existing proving systems and EVM-style precompiles. The group discusses why “just use BLS12-381” is often impractical for existing ZK stacks and why native support matters for performance. + +The call then shifts into an early design discussion around adding Poseidon-family hashing support for ZK applications (e.g., commitments and Merkle trees), focusing on how to balance interoperability, configurability, protocol complexity, and metering. Participants compare approaches ranging from high-level hash host functions to lower-level building blocks and guest-side implementations. + +### Key Topics + +- General review of upcoming protocol work and draft feedback (including `CAP-0066` and `CAP-0067`) +- `CAP-74`: BN254 host functions + - Adds `G1Add`, `G1Mul`, and `pairing_check` for BN254 parity with EVM-style BN254 precompiles + - Rationale: ecosystem/proving frameworks frequently depend on BN254; swapping curves can be non-trivial + - Notes on performance: BN254 pairings implemented purely in-contract are prohibitively expensive + - Introduces metered cost types for BN operations (similar in spirit to existing BLS cost modeling) +- Poseidon hash support discussion (pre-CAP exploration) + - Motivation: ZK-friendly hashes reduce circuit/prover costs dramatically vs general-purpose hashes + - Need on-chain hashing for state updates (e.g., maintaining/updating Merkle trees consistently with offchain proofs) + - Three implementation directions discussed: + - High-level Poseidon/Poseidon2 hash host functions with selected parameters + - Exposing Poseidon “building blocks” (sponge/permutation primitives) and implementing full hashes in SDKs + - Guest-side implementations using existing field arithmetic host functions (measured as too costly today) + - Tradeoffs highlighted: + - Interoperability risk if parameters (round constants/matrices) differ across ecosystems + - Protocol maintenance burden and “combinatorial” cost-type/function permutations vs fewer primitive building blocks + - Metering concerns and performance targets for Merkle-tree workloads (many hashes per update path) + - Practical suggestions raised: + - Prefer a configurable approach (override constants/parameters) if metering can remain sane + - Align defaults with widely used proving libraries where possible to maximize compatibility + - Consider limiting scope initially (e.g., focus on common arities like arity-2 for binary Merkle trees) + +### Resources + +- [CAP-0066: Soroban In-memory Read Resource](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) +- [CAP-0067: Unified Asset Events](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) +- [CAP-0074: Host functions for BN254](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0074.md) + +
+ Video Transcript + +[00:00] All right, I'll get started. So welcome everyone. today I'll be speaking about [CAP-74](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0074.md), which introduces host functions for the BN24 pairing friendly curve. So as you may know, Stellar already has support for has host function support for the BLS 12 through 381 curve, which was chosen over BN24 due to growing support for it and security. But we've gotten ecosystem feedback, that there are existing use cases, that rely on B2 BM 52 BN254 and adding native support for it will make it easier to implement those use cases on Stellar. The other option would be to adapt those use cases to use the existing VLS host functions. But, that would be prohibitively expensive in some cases. Cases. So this proposal the main part of this proposal is, that it adds three host functions G1 add G1 malt and a + +[01:00] pairing check, which gives us parody with the BN24 pre-ompiles in the EVM. Now there have been some discussions about extending this in particular adding a host function for G1 multiscaler multiplication. But this proposal doesn't include, that. At the moment we're just trying to support the existing BN24 use cases and this should be sufficient and for any new use cases I the recommendation is to use BLS the CAP also specifies new cost types for the BN operations, which are similar to the cost types, that we added for BLS they're just a subset. Because BN has a subset of host functions and Yeah. It's a pretty short CAP. We also define the field and groups and yeah are there any questions? + +[02:00] If there aren't any questions. Oh, Tor's typing. Yeah, it's a pretty straightforward CAP. if. If anyone has any questions, you can tag me on Discord and we can have discussion. but. If not, I'll pass it off to Jay to discuss adding host support for Poseidon and hash functions. Can you hear me? Yeah, I can hear you, J. Okay, wonderful. Yeah, great. I just want to add on what Sid just talked about. So yeah the ecosystem technically they could use all these BRS functions. But in the proving framework including like this the circuit like switching those curves off is it's in reality it's more + +[03:00] challenging than u than we thought. It's not just a simple curve swap. But sometimes those protocols can be evolved and circles have to be recompiled with different curve. And. Then there's also different type of issues with like different u like proving systems support one curve versus another. So BN254 is mostly for backward compatibility. And also we've also added a solorone example of importing the BN 254 inside the solar contract, which works. But it takes I think like 500 million instruction to do one pairing. So, that is also prohibitive. Just wanted to point out the like these two specific rational why this is needed. Cool. Cool. And yeah I will start on yes thank you an yes. So the poseidon hash + +[04:00] function has also been one of the requested primitive to add into solar. So I've recently just looked into this I don't have a CAP yet. But I will lay out what I found and sort of the different approaches u like informally here and I'll work on the CAP probably be ready in the next couple days. So yeah, first of all, why is this also needed in the proving in the DK application? The the commitment scheme is a lot of commitment scheme is hash based meaning you hash something. And then you prove, that this thing has been hashed is without revealing it. So the hash function is critical. And then for the hash function, that's used is also important in terms of the performance of + +[05:00] the prover side. So from what I understand not expert. But from what I understand is using the right hash function like Poseidon can reduce the number of arithmetic argu the arithmetic gates the arguments by an order of magn like two orders of magnitude. So this is a fairly big difference, that's why a lot most of the like the circuits use these field based hash functions and poseidon and pose. Then two are two of the most widely used ones. So the next question is why do we need to support in the host, although the proof is generated offchain and the verification of the proof is doesn't involve hashing the hash is needed. Because some applications need to + +[06:00] maintain for example a mer tree of different coins for example. And then to prove the coin's existence they can prove without hashing. But to update it on chain or some any kind of state, that involves hashing to be proven it needs to use a hash to update. So to be consistent we need to make sure, that the hash function, that's being proven is same hash function as what's being used for contract state update. So, that's kind of a big u overview of rational and poseidon functions these are not one function it's rather a family of functions or it's a rather it's a it's an approach, that based on sponges. And then ways to connect it connect these + +[07:00] sponges to provide different permutations with different input. So it's fairly generic. You can in terms of you can plug in different parameters like for example. If you require a higher security parameter. Then the round parameters will be larger and also depending on how many inputs you need to hash at once also how many output the throughput the number of runs needs to be adjusted. So. So in the Prooseidon paper there's different implementations of different versions of Prooseidon given on the requirement as well as underlying field. So here we the primary field what we have to support for compatibility is BN24 and also BS2381. + +[08:00] So, that's a two requirement and based on these requirement some of these parameters can be decided. But some of them are still can be some of can be cho choices. So we need to really decide what level of flexibility we want to supported with. So in this discussion last the discussion thread the last post I've laid out the three approaches, that I feel we could support it. So these are in terms of u from the most high level to the most low level. So number one we support it as hash functions directly. So for, that we will support Poseidon 2 and the two curves. And then so yeah I think this is the most straightforward one. But the + +[09:00] limitation is, that we are we need to kind of decide on the parameters internally like there's a. So so in the Prooseidon paper they describe how to choose these parameters and they choose it. They provide a script to how to generate these parameters and own guideline on how to choose them and in reality from what I know seems like different applications sometimes generate their own parameters. I'm talking about these internal like round constants like these matrixes. So so. If if someone from another ecosystem, that generates a proof with a different instantiation of Prooseidon. Then potentially here we could not support it. So, so. So there's a little bit of compatibility risk we have to decide. But I think for + +[10:00] this one most parameters are fairly straightforward. And. Then we need to make sure, that the major prover can be supported like the circum or no and we just need to make sure, that whatever parameter we choose is compatible with a major, that wants whether we wanted to migrate to Stellar with so, that's number one number two is to expose the internal poseidon building blocks. So as I briefly mentioned earlier the prociding hashing is a sponge based like a block cipher it consists of absorbing the input per permuting them. And then squeezing them. So one of the ecosystem member Antonio suggested, that we could support + +[11:00] the generic interface to provide these. And then users can just choose the parameters or choose the security parameters field and all, that u. And then we can expose these options through the SDK. I like this approach. But I think it's maybe a little bit it involves a little bit of figuring out the requirements for the libraries. And then the level of details we wanted to expose how to expose them to SDK. I'm not expert in the this part yet. So I will say, that this one right. Now I would say this is a less favorable option. And for I think one of the main advantage for this one is, that Osan and Poseion 2 they use very similar interface. So potentially this one is more + +[12:00] cleaner, that we just support this. And then the two different variants can be supported like more uniformly. Doesn't add complexity station. Yeah. Yeah, I think yeah, this requires the user to understand what they're doing and how to instantiate the hasher with these options. Yeah. Yeah I think yeah I'll also talk about number three. Because I think exposing the host function is the most straightforward approach. But in reality it's just the different parameters we can support them all. But then with the internal building block it's possible, that we could let give + +[13:00] more flexibility. But yeah I Think, that's kind of the main rationale. But in how to do it we have to think it more carefully wanted to say, that for approach two and probably three it's not like we have to put much burden on the users it's just, that we can build SDK functions right, that do the same cache functions for example. Because you have an option one. But there is a huge benefit of you know not introducing like combinatorial explosion of the code types into host, which I think is a really bad property of approach one right it seems like instead of just setting a few cost types we need a lot of them and. If you wanted to do something more complex we would need even more and this is I think a lot of + +[14:00] maintenance burden and a lot complexity in the protocol, which I think kind of would be nice to avoid. So I feel like these parameters could be implemented in the SDK. So user complexity would remain the same. The only question is really performance at this point. Yeah, I disagree on the cost type part. Because I think doing two and three can reduce the amount of cost types potentially. Because these are just three building blocks. And then yeah to be clear I totally agree with you. I'm saying, that I do not like option one. Because in option one we need a cost type per hash and per whatever curve and perity, which is an argument of the function, which is what I refer to this explosion right we have like three + +[15:00] different inputs to, that. So I think we're on the same page here I agree, that we probably want to have more limited and like number of cost types, that can be useful building blocks. Yeah. Yeah. Yeah. And by the way, also the RT it I think for a minimum we just we need to support RT 2 to support the Merco tree or binary Merkel tree. But we fairly certain we don't need four right. Now and. Then one maybe. So at a minimum maybe just a R of two. But I would need to confirm it. If this is just what we need. Yeah, I still feel like it is like a really hacky design, right? Like we have a host functions, it has an argument. But it can have only like couple of values. But I don't know. I would really try to stay with the building blocks and do work on the SDK side. If it is feasible + +[16:00] from you know the instruction count standpoint. Yeah. Yeah. So number three is going a step further. To to do this without providing any Prooseidon host functions. Because the algorithm itself is fairly they say straightforward. It's more like applying arithmetics repeatedly in iterations. So in theory most of the heavy costs are in the field arithmetics, which we already provide for BS 12381. And then for BN we are going to provide them. So the question is can we just do this on the guest side. So I did a bit of experiment what I wrote yeah hash 2 takes + +[17:00] about 17.6 million instructions. This is trying to like do the optimized implementation or I think it's with the current implementation, that's the best we can do and out of, which only like around three million other crypto arithmetics. The main reason is, that every like to do every irrespective you have to go round trip to the host. And then to do the bite to internal representation conversion. And then internal type for efficient arithmetic requires. If I remember correctly it's a Montgomery form. So it needs to do some arithmetic to convert the numbers into some form, that's efficient for multiplication or division for example. So there's quite a lot of these overhead. So the s the thought process is. If we can bundle these + +[18:00] into some u like more efficient field arithmetic functions such as dot product and matrix multiplication is it possible, that we can reduce, that cost? I don't have a straight answer for, that. But my intuition is, that we can include we can reduce, that significantly. But maybe not to the level, that we require. For reference to hash a mer tree of a million entries we need 20 hash operations. So this would require at least you know 10 to five times reduction of, that number. So I think we can like reducing these operations. It's it's doubtful, that we'll get close to the upper bound, that we're shooting for. Yeah bit of rationale + +[19:00] reason into, that is. Because even, though we can provide the matrix multiplication these matrix are fairly small like 3x3. So we're not bundling like a thousand operation into one we are just bundling like you know nine. And then the rest of it still like was code and these loops and, which will still have significant overhead, that's why I feel like this approach even, though it's more attractive it probably wouldn't work well and also it will require the user to handle their preciding implementation. So overall I think in my opinion number one is probably the best option to support it today. And. Then we need to think more about these parameters and choices + +[20:00] any numbers for option one like how much more is it? Yeah, I yeah, in number three, I added up all the metered cost for arithmetic. They are about 2.5 million. So I would say number one would be much more closer to, that. There's still like some like extra cost of like memory allocation and like the conversions still a bit. But I would say it will be much closer to 2.5 million. Yeah, good point. Toma are the numbers in three like. When you say optimize is, that like what do + +[21:00] you mean by, that? Is, that like a native call, that you tested 17 million? Yeah, it's not a native call. It's was contrast. So, it's basically implementing Poseidon cache in W was. But calling the host functions for BS scalar add multiply and these functions. optimize just means, that I try what I can or what I know to make sure, that these calls are all necessary. And then we're not wasting like unnecessary convergence and things like, that. That. Okay. Okay. The cost between one and two. Yeah. I don't know the in terms of CPU cost. I don't know how much they will be different. It's more like the complexity and the maintenance. So I + +[22:00] would imagine. If done properly they will be pretty similar in terms of performance. But it's more like the question of yeah we want to expose, that level right again for the complexity I sure I get the argument like I think really complexity I said can be moved on the SDK right I think what is important here is protocol complexity and I feel like one is more protocol complexity just from the standpoint of maintaining the permutations of different input. So. If two is really yeah it's unique as the same. So. So yeah. If you had a number for two and. If you knew, that it's not significantly worse than one and I feel like two is much better option. + +[23:00] Yeah. Yeah. I haven't done enough exploration in, that direction yet. Yeah, we I'll try, that in the update. Yeah. But even for two, we need at least Prooseidon and Prooseidon two. Because these spongy implementations are different. So, that we're talking about yeah like at least maintaining like six of these functions essentially. Yeah. But it just feels much better still. Because it's like at least finite and it covers like all the cases, right? Yeah. So as Nika points out like. If friendly isn't hard for coded. So you know something new needs to be done and can do it as a guest layer. Okay. Yeah I'll do a little bit more exploration to two and yeah. If I + +[24:00] yeah I'll update on thread. Yeah, Alex, I think yeah, that's a agreement. I think yeah, two is more flexible. it's just little bit of complexity to do it, right? + +[25:00] Yeah, I think In option two, even, though we expose these primitives, the internal parameters would still have to be like I think these interfaces don't let you specify the like the security parameters like the long constants matrixes. So + +[26:00] yeah one thing one other thought I had was should we expose those or should we like let the user initiate the hash with set of parameters they want and how relevant would, that be for most ecosystem. But I haven't figured out answer to, that yet. So there's potentially they're doing two with maximum in top interoperability you could be more complex. Different parameters like would they impact the runtime or are they just No, it's just numbers. No, not runtime. Yeah, it's different. It's just different hash. The output will be different. But the runtime is just the yeah the width of the inputs and the number of runs. So yeah long time I think we with + +[27:00] number two we can have, that metered with like per iterations. But the security parameter is just the choice of yeah like the choice of hash states, that don't provide different outputs. Yeah, I agree. Like. If we add a capability to provide configuration and it does not change host complexity much. Then we should do, that. Because okay from the hosting point I would be concerned about the metering. But since we can meter it all the same I don't see like why would pass in an additional vector or something as an input. But but we introducing too much + +[28:00] complexity here. Okay. Yeah. Yeah. I think yeah mix yeah two would be a good yeah good solution for interoperability. I'll spend more time on, that. I think I saw someone typing + +[29:00] Hey guys. Hey. Hey. Can you hear me? Yes. I'm in a bit of a loud environment. So, you know, you can hear me. yeah. So, I joined in the middle. So maybe I missed some things. But I think I kind of understood J's proposal. With a hardcoded option, an option, that enabled us to configure MDS and run constants parameters. Seems to me, that the configurable option is the is a good way to go. Because that still optimizes the hash function itself run faster after the initialization. But enable some flexibility. Flexibility. The one thing I think we need to + +[30:00] ensure is and I don't entirely understand how it works is how often we need to initialize the potent initial initialization types like let's say for milling as instructions. So I think it would be good to understand whether say to deploy as a separate contract. And then poly to whether the initialization only occurs once deployment is done. So, that those are details, that are less familiar. So as long as we can guarantee, that the initialization of the hash doesn't often think abilities disadvantage yeah I don't know any specific questions lots of. When doing stuff. So actually guy was looking into, that as well yesterday he was looking into implementations from what we saw Tom + +[31:00] when we did it with you. When we had the budgets it seemed So. If it's around for milling instructions. And then the hashes themselves are around 10 million for everything running wasn't right. But yeah as long as for instance those four million instructions happen in my limitation one positive, that's, that's not too bad. Sorry, could you repeat the part? What's the forming instruction?, that's the construction of the house. Oh, okay. Yeah. yeah. I think we Yeah, I'm thinking like. If if it's like a contract like every contract has its own the seat for like the RNG for example. Similarly, every + +[32:00] contract can set its own the state. And then they can once it's set it should be yeah, that should be the initial cost, that yeah I'm not also familiar I'm also not very familiar with the internal working of like these hashes. So I need to look a bit into it. But I imagine the initialization is just one time and absorbing yeah. And then yeah you need to. And then to use you have to absorb it first. So, that would be like probably based on the number of inputs. Yeah. Yeah. So the initialization is just like reading the checking the conference. It's not a stateful or something, that. So as long as, that's done, done, you can hash. Yeah, that's right. Yeah, I guess it's per No, it can be per contract life. That's a good question, that. Then you + +[33:00] need to serialize it somehow betweenations or. If you're invoking a method from another contract, that. Then I guess can just keep invoke getting cached and we can keep invoking, that. But hey I'm talking here about lesson. But but yeah I mean it's not for invocation it's How. However long you can do here questions. I have a question about what you wrote. So why would it matter whether other V projects have different configuration witnesses apart from unless you want some interoperability to be inter between different channels. + +[34:00] Other than, that we can speak around for answers. sure. Makes sense. I mean, it's not it's definitely there's definitely an advantage to having the same parameters, that Yeah. More feature for sure. Yeah. Yeah. I think it makes sense. Yeah. U Yeah. Allowing the constants to be overwritten for Yeah. + +[35:00] for different chains and different projects. Yeah. Makes sense. Yeah, good point. Thanks GPT. Okay. So I think, that's seems like + +[36:00] that's the main questions and concerns is the interoperability and the flexibility parameters. So yeah, I will look into option two in more depth. And then I will update, that thread. And then CAP will follow. Is there any other questions? I guess also to add I guess it would be good to look at the parameters of the circum lead the standard the circum library and see what they correspond to in terms of other stuff in specific projects. Because I. So far I've replaced them with something, that works on interoperable between what I could, that work in cir and rust and m + +[37:00] So, that's a riding stand library. If we can go back to the stand library and use the parameter, that probably, that simplifies C. Oh okay. Okay. So. So did you say you changed those parameters in order to Yeah, basic basically on both ends both on serum on the rough end basically parameters, that I keep from side implemented forum. But I need to check. If it depends on the fun library or from other library system, that I found. That's how it's hard to double check, that. If we could exceed to the default CD parameter, that would be good. Yeah. Okay. And probably they probably work on other project. Project. Yeah. So perhaps supporting right like supporting like a default set + +[38:00] if. If it's not overwritten like default to like a circum or something. Cool. If no other questions, I guess hand it over to you, Bri. You want to close the meeting? Okay. Wonderful. Thank you all. See you later. + +
diff --git a/meetings/2025-10-09.mdx b/meetings/2025-10-09.mdx new file mode 100644 index 0000000000..e096de8d32 --- /dev/null +++ b/meetings/2025-10-09.mdx @@ -0,0 +1,158 @@ +--- +title: "Demo of OpenZeppelin UI Builder" +description: "OpenZeppelin demonstrates UI Builder for quickly generating React-based UIs to interact with Soroban contracts, followed by a protocol discussion covering CAP-66/CAP-67 follow-ups and CAP-75’s approach to Poseidon/Poseidon2 support via permutation host functions for ZK-friendly hashing." +authors: [carsten-jacobsen, jay-geng] +tags: + - spotlight + - CAP-66 + - CAP-67 + - CAP-74 + - CAP-75 +--- + +import YouTube from "@site/src/components/YouTube"; + +## Generating React Interfaces for Soroban Contracts {#part-1} + + + +OpenZeppelin’s segment showcases UI Builder, a tool for rapidly creating usable interfaces for Soroban contracts on testnet or mainnet. The demo walks through selecting a contract, choosing a function, customizing form fields, previewing the UI, executing a transaction, and exporting the generated UI as a React app or reusable component library. + +The session then transitions into a protocol meeting focused on upcoming changes and draft specs, including continued work around transaction fees/resource limits and adding ZK-friendly primitives for developers building Merkle-tree and proof-based applications. + +### Key Topics + +- OpenZeppelin UI Builder demo + - Build contract interaction UIs from a contract ID (supports WASM and Stellar asset contracts) + - Pulls contract state for inspection (demo references `stellar.expert` as a source) + - Form generation workflow: + - Select a contract function (e.g., `set_text`) + - Customize labels, placeholders, required fields, hidden fields, and hard-coded values + - Choose an execution method (wallet connection vs specific account) + - Select a wallet UI kit (OpenZeppelin kit vs Stellar Wallets Kit) + - Live interaction: execute a transaction and refresh to confirm on-chain state updates + - Export options: + - Download a full React app (zip) for immediate use + - Use the UI Builder as a package to compose components in an existing React app + - Local management: + - Saved UIs stored in browser local storage + - Export/import as JSON for sharing or moving between environments + - Mentioned tooling: OpenZeppelin Relayers and monitors for automation, gasless UX, and event-driven reactions + +### Resources + +- [OpenZeppelin UI Builder](https://builder.openzeppelin.com) + +
+ Video Transcript + +[00:00] Hello and welcome everyone to this week's Stellar Developer Meeting. We are finally back again after Meridian and lots of hackathons and activities. But I'm great to it's great to be back. And with us today we have Steve from OpenZeppelin. You have probably heard about it. Maybe you've even tried some of their tooling. But they're building some really great tooling, that's compatible with Stellar and built for Stellar. So I'm really excited to see a demo of the latest tool, that developed, that runs the UI builder. So Steve, yeah, maybe just introduce yourself really briefly. And then yeah, we can jump into the demo. Yeah, thank you so much. I really appreciate it. It's nice to be here and to be with you all here today. So again, my name is Steve. I am doing developer relations with OpenZeppelin. And so I handle a lot of things anywhere from documentation or interacting with a lot of our ecosystem communities and you know showing you guys how some of the stuff works and yeah I think it's going to be really fun. + +[01:00] Okay, I'll share the screen here. Maybe you can zoom in a little bit. Yeah, I think I can do, that. And my screen is a little big. There we go. I think, that should be good. Yeah. Does, that look better? Yep. Sweet. Awesome. Yeah. Well, like I said, today we're going to be showing off the OpenZeppelin UI builder. Essentially, this is a tool you can use to interact with contracts, that you have on either Stellar mainnet or Stellar testnet. But it's actually goes a little bit further than, that. It's honestly like a React component library, like a UI library. And. So once you've created these forms or got the basics going, you can actually download it as an entire app or you can install the library as a package and build these UI elements yourself. And it just takes a lot of, that weight off of you. When you're trying to build interactive elements, you know, with your contracts. So today, I'll just go ahead and show you what it + +[02:00] looks like. You can visit this today. It's live at builder.openzeppelin.com. And you'll be landed in this, you know, new contract UI screen. And all you have to do is just go ahead and select the Stellar tab. And. Then you select Stellar test net or mainnet. So I'm going to click test net right now. And from here we just go ahead and paste in a contract ID. It does work with both WASM and Stellar asset contracts. The main difference is, that these will pull directly from source. But these since they're not you know all the API and things like, that are not with source will actually pull, that from GitHub directly. But both of them work. So you'll have no problem there. But we'll go ahead and pull this one in. And right away, you'll see, that we go ahead and pull in a whole bunch of stuff. First, we pull in the contract state here. So, this is the current state of the contract. This is kind of like our we call it our kitchen sync contract, that has all sorts of variables and functions just to test a lot of different, you know, possible interactions. And we're pulling, that directly from `stellar.expert`. + +[03:00] Stellar.expert. Really cool. And so, from here, we can go ahead and click next. And this is where we start to decide or build out our form. And the first thing you'll want to do is select a function. What do you want to build a form for? What piece do you want to interact with? And there's a whole bunch of different stuff. I'm going to select something pretty simple, just a set text right here. So I just hit select here. And from here I can start to kind of customize this. So I can say like set greeting. I can change the description, things like, that. from there I can go into the fields and have like field labels. I could do field types and you know. If we need an email or input, there's like a whole bunch of different types here. So I'm going to say we'll just call this greeting and say placeholder hello. And there's a whole lot of stuff you can do like required fields. You can hide from the UI, use hard-coded values. It's very customizable. And. So once we have, that, + +[04:00] I'll go ahead and do an execution method. So from here we can decide. If we want to allow any wallet to connect with this and start using it or. If we want to require like specific account to interact with it. We also have the option to do OpenZeppelin relayers. If you haven't checked out relayers, I would highly recommend you do. You can check them out at docs.openzeppelin.comrelayer. And essentially they're managed services kind of like a back-end server, that can relay transactions for you and can automate certain things and you can do things like gasless transactions. It has full Stellar network support. So it's really cool stuff. Would highly recommend checking it out. But for. Now we'll just stick with like a standard account. And. Then from there we have a UI kit. So you can basically choose, which wallet kit you want to be included. So right. Now I have just the built by OpenZeppelin standard one. It's kind of like a very minimal, nothing really fancy. So I can see my accounts connected here. I could disconnect. But. If you wanted to, you could also use the Stellar wallets kit. + +[05:00] And. So you'll have, you know, a different experience. You can kind of customize, that and choose, which one you like. I'll just go ahead and switch to OpenZeppelin. And once we have everything ready, I can either just go and hit next or at any point. When I'm in this, you know, building phase, I can hit preview form to view it and test it out. And then go back to building it. So once we have it set, I could actually go ahead and give this a shot here. So I can say hello world or something like, that. Go ahead and do execute. And, that's just going to have me sign, that transaction. And there we go. It's successful. And what's really cool is, that we can just hit this refresh button here. And you can see we have our hello world, that's been updated in our contract state, which is really cool. And from here, we can go ahead and export this. If we want to. This is going to export it as a like zip folder. And once you unzip it, it's going to be a V react project. And you could just install the dependencies, + +[06:00] build. And then you basically have a app ready to go. Now, that's only half of it. Because that's like a really simple one form piece there. If you wanted to, you can even go to our GitHub here, which is open zeppelini-builder. And from here, you can actually download package dependencies. You could build these and use them in your app yourself, the components themselves. So, there's a lot there. And just to show you too the some of the other things, that the UI builder has to offer is once we created this UI, we can see it here under our contract UIs here. And. So as you build these forms and let's say you're doing multiple different tests or multiple different forms, they're all stored in your browser as local storage and you can actually click on them here. So, here's a different one, that's a much more complex form where I can put in an account address. I can put an account label, key values. There's a whole lot of stuff you can put here. And so, these are all kind of stored. And. If I + +[07:00] wanted to, I could click on the three dots here to rename, duplicate, export. And all of this is basically just basic JSON data. So at any point I can actually export all of my current UIs into a JSON file right here. And. So from there I can import it and. If I ever needed to bring it back or something like, that, it's all dated. Just click import. And it brings in those forms, that I just had. And. So we can just go ahead and delete those. And yeah, that is the basics of the UI builder. And again, I think it works really great. If you're working with our wizard and you're like maybe putting together a contract, maybe you've deployed it. From here, you can just paste in, that address, interact with it, and test it out. Or. If you wanted to build your own web app, that has those UIs built in. Yeah, that's pretty much it. Great. If there's anyone, that has + +[08:00] any questions, please feel free to drop them in the comments. I think this was a really interesting demo. Are there any limitations or anything I should know as a developer? If what is the what would be the common use case? Is it some someone who's playing around with the UI and they can drop it into their own UI as a component or what is a typical use case you would say? Yeah, I think there's actually a lot of great use cases for it. One of them, like you said, is. If you're just playing around and you want to test around or try out a contract, it gives it a really easy un interface to start using it. But we've seen some other people as well where like it could be used in a team application like for instance to help manage liquidity pools or build more complex forms, that might be used internally. There's all sorts of things I think you can do. But I think you know for people who are just starting out just wanting to build and get experience I think it's great for, that + +[09:00] use case personally. Yeah, I can definitely see it. Developers in like early in the developer journey. If you build a smart contract, it sometimes it can be a little bit complicated. If you're completely new to Stellar to use the CLI to invoke the contract and get all the formatting correct in the invoke command. Definitely makes it a lot easier to be able to quickly spin up a UI and interact with your smart contract and test things out. But. But you said it's built in it was built in React. Yeah. Yep. It's all basic React UI components. I think we use Shad CN UI components under the hood as well. So I think we have a lot of visions too about what, that might look like in the future and how you might be able to import them into your existing app or something like, that. They are packages, that you can install to an existing React app and just start building right away. If you'd like to, which is really cool. Yeah, it definitely makes it easy to + +[10:00] drop it into to. If you're building a React application. Then drop in the forms to interact with the smart contracts. I think personally, I think we'll see a lot of use of this in hackathons where you have limited time to accomplish something. It's a very it's it looks very easy. I had a chance to play with it yesterday and it was just in a matter of minutes I had a front end for my smart contract. So really great experience. I see we have a question here. I see it as a good starting point for building deps for your customers to interact with your C contracts. Yeah, absolutely. Yeah. Absolutely. I even see it sometimes. I do a I in my role as a dev as you know from yourself, we get to play around with a lot of new stuff. Sure do. And and a lot of time just for the convenience of it. I think this could be a great way for me to + +[11:00] to test out some new features in smart contracts and. When we try to roll out. When we roll out something new. And I like to be testing things early on. But I don't necessarily want to build a whole dApp around it. So I think this has some great opportunities for both people playing around who's experimenting. But also building the dApps. Because they can drop in the their the components in their Next.js or their React application. Yeah, absolutely. Then there's a question does it support the new release of React, which includes a compiler demonstrated a few days ago at the React Summit. Let me see. I think as of right. Now we might be using React 19. So probably close. I'd have to double check. If we can actually get to using React compiler. But yeah, I'd be interested to try and see. If that does work. I know + +[12:00] that's still pretty new. Yeah. Great. Any other questions? In the meanwhile, I will say you mentioned relay. I think we are going to do a presentation about, that in about two weeks. I think it's Oh, excellent. I don't think it's on next Thursday. But I think it's in two weeks. If I remember correctly. But we are definitely going to do a developer meeting around the relay and have a demonstration of, that. So, I'm looking forward to, that as well. Yeah, don't miss, that. That's, that stuff is pretty sweet, very helpful. Okay, can you maybe just talk a little bit about what Relay is? Yeah, it's essentially like a it's like a Rust backend. So, it's written in Rust. It's very nice, fast, it's efficient, and it's very easy to set up in my opinion. You basically clone the repo. There's like a build command or a setup command you run and it creates like a single binary, which you can run you know in most environments and it'll basically create + +[13:00] like a API interface for you know smart contract interactions. So you can basically set up accounts or re they're called relayers and they can maybe have different addresses different permissions different you can load them up with different balances etc. And. So from there you can kind of make permissioned or spec particularly granted access for part particular tasks you know. And so there's a lot you can do with it. I think a lot of the people have used them for like gasless transactions. You can send an API request asking to do something. The relayer will take care of it. And then you know on your actual user interface app you never had to have a user pay for gas or things like, that. But yeah, it can really build a lot of, that. And I think there's also. When you use them with monitors, that's the other half of the equation there. monitors allow you to listen to smart contract events or functions or anything happening. You they're very flexible and the really + +[14:00] power comes in is like you can monitor one thing on your contract. And then as a reaction use something on your relayer. you know, so, that could be something like,. If somebody withdraws way too much out of a contract or is an alarming amount of money, you can maybe pause the contract or you can stop certain things, you can react to things. It's kind of built as both a security setup as well as, you know, just a ease of life setup to, you know, for building apps and stuff. Okay, great. It doesn't seem like we have any more questions. So, I would just thank you for the demo It was super interesting to see how easy it is to set up a front end and create front-end components for a smart contract. I It's kind of tooling where it's making life a lot easier. You get something up and running quicker. And one of the things, that I always love about using tooling like this is, that it's gone through a lot of testing. It's gone typically it's gone + +[15:00] through some auditing as well. So. So you get a really solid starting point for your project instead of trying to build everything yourself. So, thank you so much and yeah, we'll be back next week with another great demo. And just a reminder for everyone joining here at 1:30 p.m. Pacific, we have a protocol meeting on Discord. So, see you all there and thank you again, Steve. Thank you, Kristen. Yeah, it's great to be here. See you. Bye. + +
+ +## Transaction Fees, Resource Limits, and ZK-Friendly Hashing {#part-2} + + + +This segment continues ongoing work around transaction fees, resource limits, and developer-facing protocol ergonomics. The conversation builds on prior proposals aimed at making smart contract execution more predictable and sustainable, while preserving flexibility for more advanced applications. + +The discussion then turns to support for zero-knowledge–friendly hashing primitives, motivated by the growing interest in Merkle trees and proof-based applications on Soroban. Rather than introducing full hash functions at the protocol level, the proposed approach exposes low-level permutation primitives that allow developers and SDKs to construct ZK-compatible hash schemes while keeping the host interface simple, stateless, and adaptable to evolving cryptographic parameters. + +### Key Topics + +- Continued discussion and sample implementations related to `CAP-0066` and `CAP-0067` +- `CAP-75`: Poseidon / Poseidon2 support for Soroban via permutation primitives + - Motivation: ZK-friendly hashes dramatically reduce circuit constraints vs `sha256`-style hashes + - Need for on-chain hashing to match offchain proof systems when maintaining structures like Merkle trees + - Design choice: expose internal permutation functions (rather than full sponge/hash APIs) for: + - simpler, stateless host integration + - better maintainability and flexibility across different parameter sets + - enabling SDKs/contracts to implement common hash modes (e.g., arity-2 Merkle node hashing) on top + - Parameters discussed: field selection (BLS vs BN), state width `t`, rounds (`rf`, `rp`), exponent degree `d`, MDS/round constants + - Implementation/testing notes: + - Plan to ship SDK presets (e.g., common parameter sets used by proving libraries) + - Provide a “hazmat” interface for advanced users to supply custom parameters + +### Resources + +- [CAP-0066: Soroban In-memory Read Resource](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0066.md) +- [CAP-0067: Unified Asset Events](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0067.md) +- [CAP-0074: Host functions for BN254](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0074.md) +- [CAP-0075: Poseidon Hash Primitives](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0075.md) + +
+ Video Transcript + +[00:00] Okay, I'll get started. hi everyone. Today, we are going to be talking about [CAP-75](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0075.md), which is a proposal to add two new host functions to support Poseidon and Poseidon 2 head functions. So the CAP itself is fairly straightforward. two new host functions. It it's pretty standalone doesn't involve any of the previous protocols mostly except using the BLS and BN field. But I want to spend a little bit time on the approaches, that we decided on and the rationale behind this approach. So first of all just a quick introduction on the motivation why do we want the Poseidon and Poseidon 2 hatches. So. So + +[01:00] the Poseidon and Poseidon 2 they are I'll call them Poseidon family. So they are family of hashes basically it accept a range of parameters. And then you can configure it to compute output. So what it is it's a sponge based hash function, which means it consists of a sponge, that absorbs input and fit output, which is a variable length input various variable length output function and internally there's a permutation, which is fixed input fixed output and the reason, that u it is useful along with Other catches of similar design is, that it directly operate natively on the prime field. And as you can as you know most of the zero + +[02:00] knowledge applications are built off of the prime field such as BRS or BM254, which makes Poseidon natively work with this field and they are ideal in DK application. Just to give a rough idea, it the amount of constraints to generate using a regular hash like a SH 256 versus Poseidon is two order of magnitude difference. So it's a lot of proof side less proof side complexity and time to use Poseidon. So for contrast. So proof allocation typically don't involve rehashing or anything. But for contract implementing logic such as a mer tree, that you want to generate proof of you want to have the same hash implementation between your contract and what you use. When you generate the proof for, that + +[03:00] mole tree. I think, that's quite obvious. And to have the Prooseidon implemented on the gas side or contrast side is quite expensive. Even with the functions, that we already provide for BLS field arithmetics, there's still a lot of round trips between converting a few element to bytes and things like, that. So, that's why we want to support Prooseidon as native host functions. So just a recap of where we were last time. Last time we proposed a few three possible approaches to this. First is just to provide the hash functions plain and simple. You know for different field you know for different field combination of B and BLS combination of PID and PIN 2 and a combination of number of hash + +[04:00] input. So this gives us what like eight different type of host functions, that will cover a lot of use cases. But this approach isn't good. Because we've from the feedback and from the evidence, that a lot of these hashes they require particular parameters these parameters are not set in stone. So different implementations could choose them differently. So even. If we specify them in the host for example someone else might need a different way to provide a parameter. So this way isn't just provide a hash function isn't the most maintainable way and what about providing more host functions for those field arithmetics and also it wouldn't solve the problem. Because even. If we provide things like matrix multiplication the pose hashing involves a lot of rounds of permutation each + +[05:00] round is doing this multip multiplication. So we still end up with a lot of these cause and a all of these round trips. So one of the proposals ended up last time was can we provide a more generic interface for the sponge hash namely the to absorb some input doing permutations and spit out the output. And then be and let it be parameterized. So yeah so, that's what we settled on last time and yeah. So now to transition to this CAP. So this CAP proposes a slightly different approach, which I call approach four, that from the discussion thread for anyone who's following. So approach four is instead of providing the launch interface we provide the internal permutation functions, that the launch + +[06:00] hash uses. So why is, that? So. So the sponge is a it's sort of its own animal. It's a different animal than the permutation. sponge design had many different considerations for different applications. For example, not only for hatching. But also for like generating like a random stream of bytes or for MAC method message authentication code or for random like for receable random value generator like these applications are all u designed scope of a sponge. So the sponge needs to be more flexible. It needs to maintain a state. It needs to in some cases it requires switching between absorbing and squeezing. And then + +[07:00] switching back. So the sponge interface itself isn't it isn't a single definition, that we can just take. Because and itself is also has some complexity in it. However the internally all the sponge function they call the same permutation function. So after. So in both stun absorb and squeeze the input and output before it was the input is passed. And then the permutation is applied and the output is squeezed. So. So at the center of it is just this permutation function, that takes a fixed length input and a fixed length output. So this is much more maintainable and it's much more easy to support in solarong without ramifications. Because + +[08:00] it's just a simple cryptographic building block and the sorbound host doesn't need to maintain any state. So there's no like state related issues for like what. If you make a subcontract call do they you know reinitialize a sponge and how to do, that. So. So by supporting Prooseidon permutation instead of the full Prooseidon hash it gives us a lot of flexibility maintaining sort of host and also sponge itself for a h for a simple hash algorithm, that's like a binary hashing of a Merkel node for example isn't, that complicated. Is just basically it's just a single round of taking the input and applying the permutation and spitting out the output. I've written down a little pseudo code, that basically is what it is in the CAP. So. So just to + +[09:00] recap, the sponge interface is complicated. It the sponge is designed for many different use cases, not just hash. Even, though for hash the sponge is really simple we don't want to just baking a simple sponge implementation inside host. While this is much easier to do it in the contract or in the SDK. But it is makes much more sense to support the primitive, that's mostly the most expensive operations happen, which is the permutation function inside the sort of host So, that's the high level motivation, and now I can go through the actual two functions being proposed. One is a Prooseidon permutation and second one is Prooseidon 2 permutation. The two + +[10:00] look fairly similar. So. So just a bit of background Prooseidon is the original version developed in 20 2019. Poseidon 2 is a improved version of, that developed in 2023. Internally they work slightly different. Poseidon 2 has u improved the internal matrix shapes. And then add pre relication add a single layer of pre-apply. But overall it works fairly similar. So I'll just cover the Poseidon one. So the process permutation takes the input, which is a vector object, that is your vector of field element. And then the second thing is the field type. So I think it probably makes sense to have field type as a symbol. But in the CAP I'm putting U32 should be + +[11:00] equivalent. But yeah field basically is a enum, that specify, which field you want your permutation function to be defined on and of course your input will be passed in the same field as the permutation matrix and all, that. and the rest of it is just internal parameters for the hat functions. so, that's pretty much it. But I'll just spend maybe a few a couple minutes just explaining what these fields are. t is the capacity parameter. So it's the capacity the internal sorry not the capacity. But the internal state side much must match the input and also must match the size of internal matrix. It is decomposed of the rate, which is number of you can think of as number of input + +[12:00] you need to hatch at once plus some capacity, which gives you additional security parameters. D is the degree of the fbark, which is the internal one of the internal step in the permutation before applying the matrix multiplication. So for bn and bs both fields d is equal to five. So we may just only accept the equals 5 u. But of course for other field in the future could be different. The round f round t these are the internal partial round and the for round these are the number of round the matrix multiplication happens. So the yeah. So the there's also a linear layer, which is u sorry the linear layer is a run the mod matrix multiplication + +[13:00] but the nonlinearity is provided by the run constants, which is also matrix. But specified for each run. So the. So the MD MDS matrix for hash two function is 3x3 run constant is a is another matrix with dimension n by m where n is the number of run and m is three. So, that's a high level run through of the different parameters. The second one is fairly similar. The only difference is, that the internal MDS matrix in the second version is a diagonal matrix. Because of the precision 2 improvement in terms of cost type. So we. So we manage I think we can manage to ship this as a native solar bond library, which means, that all of the operations they are already existing and were defined. These include the + +[14:00] field arithmetic for adding and the multiplication and things like, that. So there's no particular peridon specific metering parameters we need to support. But we do have to add these ban 254 field u arithmetic cost types, which is a very opportune timing. Because we have the [CAP-74](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0074.md), which has b24 scala field and pairing. So I went through the parameters metering rationale. Yes. So, that the rationale why hash function why we don't provide a hash function out of box rather than providing the permutation primitive. It is explained and yeah just to also mention these parameters they are + +[15:00] so. So yeah. So so some of them they are kind of defined based on the application like. If you have a curve of. If you want to do hashing with BN25 254. Then the degree is five you can change, that. And then the sum of them is depend on the actual application, which is how many inputs you want to hash at once. And then based on the size of the input these parameters like t. And then the number of runs they need to be adjusted the original procidum paper provides scripts and guidelines for how to generate these parameters in general I think most applications have their own they have they use a set of generated parameters like CIRCOM does and Noel does and to provide the maximum + +[16:00] like safetiness we would have these as part of preset in the SDK. So like the SDK interface will look exactly just like a hash two, that takes two input. And then internally you may be able to specify the set of parameter like a circum parameter or neural parameter things like, that. And then we also possibly expose these Poseidon permutation as a more like a hazmat interface something, that advanced users, that know what they're doing can call with their own parameters. But we will make them the distinction more clear. Yeah. In terms of testing there's a reference implementation for Prooseidon. And so Horizon lab has a reference + +[17:00] imitation for Poseidon 2 and in the same repo has Poseidon one. That's the one we likely going to adopt. And it also provides test vectors for both one and two in different fields and parameter. And. Then there's also the original Prooseidon paper, that come with a sage script implementation with some reference vectors, which we will also match our implementation with in our tests. Tests. There's a draft implementation and I think, that's it. Now four is open for questions. + +[18:00] Okay. + +[19:00] Yeah. So I did a very rough well I wouldn't say too rough. But like the implementation is fairly mostly good. And then the field for BRS is already there. So I did a number on the reference test cases for BLS the 3x3 matrix case, which is the hash two operation, that's about half a million CPU instruction. So it's a lot better than what I projected last time. And. Then the 5x5 matrix case I believe, that's like cache three input, that's around 1 million CPU. So yeah, that's well below our target ceiling, which we want to support around 20 hash cores in a single contract. + +[20:00] questions. Okay. Back to you, C. Oh, okay. You want me to close it? Okay. Bye, everyone. + +
diff --git a/meetings/2025-10-16.mdx b/meetings/2025-10-16.mdx new file mode 100644 index 0000000000..e9ef7407e3 --- /dev/null +++ b/meetings/2025-10-16.mdx @@ -0,0 +1,139 @@ +--- +title: "State Archival Bug Review" +description: "Discussion of a state-archival bug introduced with Protocol 23 (Whisk), its impact on archived ledger entries, and the proposed protocol-level remediation focused on correcting archived state and safely unblocking affected Soroban contracts." +authors: + - carsten-jacobsen + - dmytro-kozhevin + - justin-rice + - nicolas-barry + - tomer-weller +tags: [developer, CAP-62] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This meeting is devoted to a recently discovered state-archival issue introduced with Protocol 23 (Whisk). Core developers explain what went wrong in the archival and restoration logic, how the issue was detected and contained, and why the network remained consistent despite the bug. + +The discussion focuses on the scope of impact, the reasoning behind the proposed remediation strategy, and what builders and infrastructure operators should expect during the upcoming protocol upgrade. Community questions cover challenges of pruning old ledger state, solutions/tradeoffs, and handling reconciliation in [CAP-76](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0076.md). + +### Key Topics + +- Overview of the state-archival bug + - Introduced with Whisk (Protocol 23) + - Affected a small number of archived ledger entries restored with non-latest values + - Deterministic behavior across validators (no fork) + - Total affected entries under 500; only a subset were restored and quarantined +- Containment actions + - Emergency measures to halt further archival of affected entries + - Manual extension of expiring entries to prevent additional corruption + - Patched Stellar Core released to quarantine corrupted ledger entries + - Transactions touching quarantined entries temporarily blocked +- Remediation approach + - Focus on fixing archived (not-yet-restored) corrupted entries at the protocol level + - Archived entries will be corrected so future restores return the proper value + - Live entries that were already restored incorrectly are handled by affected protocols directly + - Lumen supply discrepancy addressed by accounting adjustments (moving burn to fee pool) +- Transparency and verification + - CSV published listing all impacted archived entries + - Includes expected values, corrupted values, and ledger numbers + - Core will verify and update only entries matching known corrupted states +- Operational guidance + - Protocol 24 is a stability-only upgrade (no new features) + - Validators, RPC, Horizon, and related infrastructure must upgrade + - No SDK or XDR changes required for application developers + - Testnet upgrade precedes mainnet vote +- Ecosystem coordination + - Direct outreach to affected issuers and protocols + - Protocol-specific mitigation strategies (e.g., supply adjustments, internal accounting fixes) + - Emphasis on fast response, validator consensus, and Final Comment Period feedback + +### Resources + +- [CAP-0062: Soroban Live State Prioritization](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) +- [Blog Post Announcement](https://stellar.org/blog/developers/addressing-state-archival-inconsistencies-protocol-upgrade-vote-next-week) +- [Community Discussion](https://x.com/i/spaces/1rmxPvRdmyjGN) + +
+ Video Transcript + +[00:00] We're going to talk about a protocol upgrade, that we're pushing for next week, that is around fixing a bug, that was discovered last week. So last month we pushed out whisk, which is Protocol 23 protocol was upgraded by a vote from tier one validators. And on Thursday last week, a week ago, we discovered a bug in the protocol. We've we worked the following two days on containing the bug. Which affected state archival. So basically some small number of ledger entries, that were archived with the wrong version. So the not most recent version and. When they were restored the + +[01:00] restoration allowed for the non-most recent version to be restored. The total number, that was actually affected is less than 500 ledger entries. 478 to be exact. Out of these only 84 were restored. So these are ledger entries, that were actually restored to the Navos recent value. Last week, Thursday, Friday, we worked with tier one validators to contain this issue, which means, that we've set out an emergency SLP to avoid archiving more u more ledger entries. We also manually bumped all entries on the network, that were about to expire so, that they don't. We. Then issued a released a new version of Stellar Core, that was patched in order to ensure, that the affected + +[02:00] ledger entries cannot be touched so, that any state inconsistencies remain quarantined. Quarantined. That happened last week on Friday and from, that moment on we've been working for the past week since Friday night on communicating with validators and with affected protocols to understand what the extent of their issue was. We've also suggested a few protocol upgrade options in order to rectify this. And the protocol option, that is overwhelmingly the most popular is the less intrusive one, which is to obviously fix the bug to fix the archive. So all the ledger entries + +[03:00] that were archived and not restored will be fixed in the archive and so. When they're restored they will be restored to the correct value. But with regards to the ledger entries, that are live and have been restored incorrectly we will not be addressing them on the protocol level. But we have ensured with all affected issuers and protocols, that we've been communicating with. We're talking about around 20 protocols. That they do have ways to rectify this on their end. So for some issuers, it just means, that they need to adjust the supply to make sure, that it's it reflects what's on chain. A lot of the state changes have actually been benign and not had any meaningful impact. and some protocols have had ways to, kind of like upgrade out of these issues. And so I am going to stop talking + +[04:00] for a bit right. Now and, happy to, take any questions. Justin, did you guys hear all this?, yes. It sounded great. And right. Now I'm just going to paste the blog post, that we just released, that explains this a little bit in the chat over here. So. If you want to sort of read a version of what Tor said, it's there. We also you'll see, that there's a link to a CAP there. The CAP is the suggested change, that Tor was talking about and I think Nico is here and will join us on stage in a minute to talk through the CAP. But first there is a question from Jome. This was completely random. So some entries were correctly archived and restored. Restored. It's not random in the sense, that this was not arbitrary. It was deterministic in the sense, that all protocols all validators actually behaved exactly the same. So the network did not fork based on this + +[05:00] behavior. It was consistent. But it required a very kind of like specific arbitrary chain of events, that relate to how whisk handles live state prioritization between the bucket list, which includes all the live states and the evicted state. So not random. But definitely also not trivial to identify. Does, that answer your question? Okay, it looks like there are I mean I see someone saying I am late. How bad is this? As Tor pointed out and you can read, that blog post, that's posted above, this bug was discovered by Elliot friend who's here on this + +[06:00] call. Who's who is a Senior Developer Advocate at the Stellar Development Foundation and he discovered it. While he was you know doing some work to get ready for a hackathon, that's coming up and you know short it did not take long at all and you can see the timeline there for there to be for the issue to be very quickly quarantined and the fix will be rolled out pretty soon. So. While it's a bug, that, you know, is very it's a serious bug. The impact is something, that we've like evaluated, talk to everyone who's been impacted about and, you know, it's definitely contained at this point. So I'll let you read the post and decide how bad it is. But my answer is I think, that the response has kept it pretty contained and has cut it off very quickly. But the other. So at this point we have I'm going to post here this is cap + +[07:00] yeah and as Dimma points out above I mean you can read what he wrote in response to your question too. So, I know there's a lot of discussion going on. But at this point, I also think it's important to take this opportunity to look at the proposed solution, which is what I just pasted there. It's a Core Advancement Proposal 76. And we have some people from the core team to walk through this. Nico, do you want to do it or is it better. If Dimma does it? I can Yeah. Take a first stab at it. It. Guess hear me? Okay, perfect. All right. So yeah. So like at a high level what the CAP is describing is first what is actually in scope for the to kind of mitigate this problem. So like Toma was explaining earlier there's actually two + +[08:00] types of entries, that u, that were corrupted. They both started in the same way where they basically entries got moved from the live ledger state into the archive and during, that process entries got corrupted. And then after, that some of those got restored and at, that point they were restored for all sorts of purpose. It could be for just reading them like a you know a lot of entries on in Soroban are just accessed for reads like a WAM for example is an example of, that. Others were also accessed for modification. And yeah. But what the CAP is putting in scope is the only the first category of entry. So + +[09:00] basically the ones, that were moved into the archived and kind of stayed there. And and the solution as part of this is to basically go and take the value, that was supposed to be in the archive. And then override the archive with, that expected value. So the fix itself sounds fairly simple. U. Because it is the u the bit of a challenging part here is how can you all of you guys can trust us, that this is the right list and there's actually quite a bit of work, that went into this in the CAP. So the there's a full list of basically of in a CSV file, that you will find where you will see for each of those entries, that end up in the archive we have the ledger number + +[10:00] where. When the entry got moved into the archive corrupted like I said. So in, that case we actually have in this CSV the value of the entry as it was at the moment it was about to get moved. And. Then we also have in the same row like the value, that of the corrupted u entry and. And then what happens. When we are going to u follow the this CAP right is the core is going to scan those entries one by one and see the ones, that match the corrupted state in the archive and then. If the. If it's there it should be. Because we also froze restoration at this point. Then the value will get + +[11:00] replaced by what it should have been. So, that's kind of like the first part I guess of this fix. There's another one, that's this one is actually related to this second set of entries, that actually got restored. That's for the total lumen supply. They were about I think it's like three lumen around, that like of accounting discrepancy. So in the proposal what we are saying is, that well some lumen got burned in during this event we're going to just kind of for accounting purpose move, that burn as a into the fee pool. So, that's kind of what the mitigation is for, that. So in terms of like the like I said like the. So this is + +[12:00] the CSV file what the what core is going to do as part of like in terms of like actual steps right to restore the entries in the archive to the right value in terms of like verification. So there like a few things, that can be done. There's first there's the list itself. This one was computed by instrumenting core and replay all those events, that are around eviction. Eviction. The version of core, that will go out with the protocol change will have this instrumentation and better than, that actually it will also have a way to basically use, that CSV file and check, that the modification those changes are the only changes, that happened in the in, that time period. So it's kind of a + +[13:00] double you know double use in a way where the CSV file you can use it first to see what is the full list of impacted entries to also see, which ones will be restored and you can also use, that as a way to understand, that what will happen with Protocol 24 and also, which one, which of those entries will not be impacted by this protocol. Upgrade in, which case they will stay corrupted as they are today on the. So those are like the entries, that are in yeah, that got restored. So those are like live entries. So maybe I should stop here for a bit see. If there are like specific questions I just want to add one thing. Because I got some questions on this earlier today the patched version of core, that is currently in production by tier one + +[14:00] validators bans every transaction, that tries to touch any of the state, that's been corrupted. That's those 87 ledger entries. So. If for some reason anyone on this call or anyone you're talking to is unable to get a transaction in and is getting a timeout. Because of just getting timeout. Then this might be. Because you're trying to touch, that state. And part of Protocol 24 is removing, that ban. So, that ban is supposed to be removed as soon as the protocol is in effect. Which is right. Now scheduled for Wednesday next week 5:00 p.m. EDC. And again, that's a small number of ledger entries, right? Yeah. Just for just for context, + +[15:00] you know, ledger Stellar is a database. You can you know think of these ledger entries as row rows in the database. And Stellar has around 47 million rows in this database. Out of, which 80 87 have these issues. There's a question will Protocol 24 contain new ZK features? No. The the features, that were planned for Protocol 24 will. Now be pushed to the next protocol. Protocol 24 is just a stability upgrade. Mootz has a question to clarify this CAP focuses on managing the existing corrupted entries. Was there a cap/update, that could be shared, that + +[16:00] addresses the root cause of generating corrupted entries? Yes. Definitely this Protocol 24 will fix the debug. Yeah, actually I thought we included the fix itself in the CAP. So. If we didn't, I'm sorry about, that. But yeah, it should be there. There. Yeah, Yeah, like we could add it to the CAP for posterity. Like we typically don't include bug fixes in the cup. Because it's like two lines fix and you do not do anything new from the protocol standpoint. Standpoint. Like you already have a CAP defined for this feature. The issue is not the protocol. But the issue is, that implementation was incorrect. But the CAP itself is [CAP-62](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0062.md) I believe. So you know we got added to the CAP like + +[17:00] that we also fixing the bug. But it's not, that historic historically done for the bug fixes. Sorry about, that. Does, that make sense to you? Of course, I asked as several people are typing. Just to confirm, will this require a network pause? No. I see there are some questions about the features, that were planned for Protocol 24, that will. Now be for Protocol 25. If you scroll up and you grab, that CAP, you can actually look at the different proposals for things, that relate to ZK features. And I think you can dig into those. But for now, I think we should + +[18:00] kind of keep this focused on the current situation, the current upgrade. Because I think it's also important. I mean, Nico, you sort of talked through a lot of this. But can you just tell us what happens next? So in terms of you mean like implementation. So and all this. So like the team already has been very busy actually implementing this u this protocol change. Given the yeah the urgency of getting this voted as you know as soon as possible so, that we can you know like Toma was saying part of this is also allows the network to unblock those entries as well. So yeah it's a it's there are many reasons, that why we want this to + +[19:00] happen as quickly as possible. The. So yeah in terms of next step what's going to happen is, that. So we are going to have builds of Stellar Core and the rest of the like platform type of software, that depends directly on Stellar Core released by, that's like Horizon RPC galaxy. Yeah. Yeah. Yeah. And. Then those will be released by Monday. And. Then what we expect to do is Tuesday will be the test net upgrade to this new protocol. And then we'll get the vote to with the validators to be scheduled on Wednesday and hopefully given, that we did a lot of work already with those validators the vote will actually be approved and meaning, that we'll be back in business in general + +[20:00] at, that time. And so just in general to prepare. If people run infrastructure, that basically has Stellar Core embedded in it. So either Stellar Core for a validator or those things, that are on the platform side, that Nico mentioned RPC Horizon galaxy you'll need to upgrade right before the test net. If you want to your infrastructure to work on test net and before the mainnet floats. That means you have to like pick up the fixes, the new releases on Monday. But unlike other protocol upgrades, this doesn't require everyone to install new SDKs. Correct. Correct. That's correct. Yeah. It's only there's no impact to the XDR in particular. And. Because of, that, that means, that applications in general don't actually need to do anything. It's basically like about picking up the It's already about picking up the right version of core at + +[21:00] this point. The reason we have to upgrade systems like RPC and Horizon is. Because there's actually a check in the those systems, that check, that the protocol version, that core is, that the network is running on. So in this case it will you know after the vote it will switch to 24. 24. So like the systems are actually built with a safety in place, that they will only ingest data from a protocol, that they understand. And. So basically this is this constant and, that's the only, change really in those downstream systems. Systems. I'd like to answer Adam's question. So Adam asked. If a reserve asset is added to the pool they are tokens created inside the contract it won't require a deposit of the actual assets to reconcile. So we did have a few of these ledger entries ended up being + +[22:00] ledger entries, that relate to liquidity pools. And the remediation u is a bit different between different pools and different assets. So for some assets, that changed imbalance. If they have a centralized issuer it was fairly simple to basically add the backing asset and offchain so, that they can reconcile the issue. In some other situations it was almost trivial to burn. If if there was a eronous credit for some pools some liquidity pools are sorry are implemented in different ways. So for example the Aquarius pool is implemented in a way in, which actually adding an asset doesn't change the price or anything. Because + +[23:00] Aquarius has like internal tracking of the reserves. And so manually adding a reserve doesn't actually impact price and, that added balance is unusable. It's not like, that for source swap. And so it's really different assets and different protocols like found different ways to fix it. But again we've talked to pretty much every affected protocol and issuer and ensured, that all of them have ways to fix the situation in the context of these 77 ledger entries. Sorry, I said the wrong number before. It's 77, not 87. And I can't stress enough how helpful every protocol and asset + +[24:00] issuer have been. I think we started reaching out Friday night and Saturday morning and we've and everyone has been extremely understanding and collaborative and it's awesome to see the ecosystem come together like this. I see there's still some people typing in the chat. If the questions wind down is I think a lot of it's just sort of comments. I'd say. If you have a question, get it in now. It's telling me OrbitLens has a question. But Orbit asked, "Do we need to update the RPCs as well?" and George from the platform team said yes. + +[25:00] Yes. Yeah. Is there a technical reason this must be a protocol version upgrade instead of a patch release? Well, yes. like it's a it's an update to the ledger and. Because of, that it has to happen basically on all machines at once. It was going to be like a patch release. yeah, we would only be able to kind of the change would happen basically on like whatever machine is running right on, that with, that patch version. + +[26:00] Is this recorded for playback, Carsten? Yes, correct. Yeah, I would say in general. If you know you have further questions or it's a good idea to read, that blog. It just came out I think an hour ago, maybe even less. And also to read the CAP. and in general, I think. If you're interested in these kinds of issues, make sure, that you pay attention here or sign up for the Stellar Dev mailing list. Make sure, that you pay attention to the CAPs. we do try to be as transparent about any of these changes as possible. And obviously we you know through all of this we not only couldn't make a decision without all the feedback from the ecosystem about what to implement. But implementation is just one step in governance and the actual adoption of a protocol change on the network requires validators to vote it + +[27:00] in. So in all of this we always are trying to talk to people including you know this call. But also online and various other platforms and we're trying to like put the work out there in a place where you can examine it and understand it and we certainly encourage you to do, that. In fact, with the CAP, there's a whole process for Core Advancement Proposal and as part of it, we put them into Final Comment Period. And. Because of the urgency here, we just fast forwarded this to Final Comment Period. And. So it's going to stay in Final Comment Period, which means, that you can write about it on the Stellar Dev mailing list until the time, that the validator vote happens, which should be on Wednesday. So, this CAP is officially in Final Comment Period. You know, we definitely urge you to take a look. If you have anything to say to add it. + +[28:00] Orbit is pointing out the short timeline. Yeah, it is a short timeline. I agree. yeah, like the reason we are still kind we think this is best for the ecosystem is, that we are actually giving at this point like yeah like a you know almost a week notice, that this is coming. So the people should be able to prepare ahead of you know and allocate those two days for upgrading their infrastructure. Infrastructure. I think given the severity of having those entries blocked I think this is the probably the right trade-off. + +[29:00] Nico, can you what will the experience be like for somebody, that needs to upgrade their RPC? RPC? Will it be difficult? So like Yeah. No to upgrade this is basically just like doing a regular kind of a software package update. U there's really no other steps to take. And for people, that don't I think you mentioned, that earlier the this, that core instance will get stuck until u until the package gets updated. It's not going to do like some strange things. It's just going to + +[30:00] Anyone else have a question or anything to add? I think there were question I saw a question, that I don't think we can really answer to, that directly like on potential impact of like for specific protocols. I think yeah th those need to be answered by those protocols themselves. Like the assessment, you know, that we got from basically everyone at this point is, that people are going to be able to mitigate for the most part + +[31:00] the impact of this corruption. I think Elliot, that the answer to Your question is what Nico just gave. + +[32:00] Well, I'll give it a few more minutes for some of these comments or questions to come in. Anyone know where the bug came from slash how it got in? I mean like bugs or bugs. Yeah, it's kind of a they're always sad. What I can say is, that yeah we had multiple rounds of code review. We had + +[34:00] Well, feel a little bit more typing. It seems to be witicisms more than questions. Thank you for being. So responsive to fix the issue. That's, that's really nice. Yes. Again, I also want to say echo what Tor said before. This has been like a really cool moment where a lot of engineers at SDF. But also a lot of people in the ecosystem have really stood up and provided information, given opinions, given feedback, reviewed data, done their own analysis. And. So this is like, you know, for what it's worth, it really does show, that there is just an ability to be super responsive in general across the ecosystem. + +[35:00] Yeah. If you're not following over there in the CAP, there's actually a list CSV file, that has all the different columns, that got restored. So, you can actually look at all the restored entries. yeah. Or you can do some stuff with the RPC. Look at, that. We're learning here and + +[36:00] sharpening our honing our chops. Yeah, that's correct. Right now, all you will get is a timeout from. If you try to interact with one of those entries. I see somebody said and may maybe this was a joke. I can't tell. It says or not, I guess there's no way to tell. So, this isn't going to cause Stellar to fork. Fork. No, this Well, I, that's a tricky construction. Because there's a negative in there. this will not cause Stellar to fork. That's my statement. + +[37:00] I'm trying to even think through how it possibly would. I don't I can't even picture how it might Yeah, again the important I mean the place where we are right. Now like the issue was identified the functionality was turned off impacted ledger entries were temporarily quarantined fix will basically allow them to be uncaranted. And and allow the any of the ledger entries, that are currently archived. When they get restored they'll be restored correctly I believe is where we're at. But again all of, that is an implementation based on feedback from the ecosystem it actually requires validator ascent through a programmatic vote on + +[38:00] the network to go live and take effect. It looks like Adam was experiencing a timeout message to save you from yourself. Okay, cool. I feel like we're winding down. and again, some takeaways, read the blog, read the CAP CAPs and Final Comment Period, join the Stellar dev mailing list. If you want to leave a comment. Look at, that thread, that persists about the issue. If you have questions or an experience, that you want to share, we will try to keep you updated as new things happen. Again we have sort of it seems like there is social consensus on a path forward for a fix, which is what is captured in, that CAP. As + +[39:00] the Stellar Core team implements a solution and it becomes available we will let you know and shortly thereafter we will try to upgrade the network. To orbit's point it's a very short time frame for, that to happen. So, we're starting to talk to people. Now to let them know. But once this all the software is released, the actual upgrade process from a from an operational point of view should be pretty straightforward. So yeah, I really appreciate you all for joining this and maybe under different circumstances we can just hang and chat again sometime. This was nice. Thank you all. + +
diff --git a/meetings/2025-10-23.mdx b/meetings/2025-10-23.mdx new file mode 100644 index 0000000000..9ccb21ce73 --- /dev/null +++ b/meetings/2025-10-23.mdx @@ -0,0 +1,121 @@ +--- +title: "Fernando's Dob Protocol" +description: "Fernando Castillo (Dob Protocol) explains how Dob turns real infrastructure projects into transparent, on-chain revenue-sharing opportunities using Soroban smart contracts, offchain verification, and USDC-based splitters to distribute returns to supporters." +authors: [carsten-jacobsen, fernando-castillo] +tags: [spotlight] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +Fernando Castillo, CTO and co-founder of Dob Protocol, shares how the project helps Latin American infrastructure operators access funding when traditional bank financing is slow or unavailable. The conversation focuses on how Dob collects verifiable project data, applies automated checks plus human review, and uses Soroban smart contracts to distribute revenue to supporters in a transparent and auditable way. + +The session also covers practical design decisions: why Dob focuses on revenue streams rather than tokenizing the physical assets directly, how compliance processes remain local and largely offchain, and how the protocol plans to attract liquidity across chains while keeping distributions simple (primarily USDC). + +### Key Topics + +- Problem Dob is targeting + - Infrastructure operators in LatAm often struggle to access traditional loans due to bureaucracy, country risk, and inflation-driven rates + - Standard crowdfunding lacks trustworthy, automated, and auditable revenue distribution back to supporters +- Dob’s approach and workflow + - Operators upload verifiable project/company data (e.g., device specs, invoices, revenue evidence) + - Evidence is checked via automated validation plus human-in-the-loop review + - Projects receive scoring/risk assessment that influences expected returns + - Only hashes/commitments of documents are recorded on-chain to avoid publishing sensitive data +- Token Studio and project funding model + - Two primary roles: infrastructure operator and supporter/investor + - Focus is on tokenizing/allocating rights to future revenue flows (not tokenizing the physical asset itself) + - Revenue distributions are designed to be transparent, scheduled, and proportional +- Soroban smart contract “splitter” design + - Contract holds inflows (typically USDC) and distributes based on configured shares + - Configurable cadence and allocation rules per project + - Withdrawal flow for token holders to claim their share + - Uses custom token logic for holder accounting (beyond classic asset behavior) +- Security considerations discussed + - Access control for admin-only actions and authentication requirements + - Limits/validations around allocations and transfers + - Operational tooling for deployments and transaction safety +- Compliance and safeguards + - Local KYC/KYB and AML processes are required for participating businesses + - Discussion of mitigating “rug pull” and expired/invalid proof risks + - Mentioned future work toward collateral mechanisms for additional supporter protection +- Broader uses beyond infrastructure revenue + - Vesting-style distributions, DAO treasury flows, and reward distribution as related patterns +- Liquidity and growth plans + - Targeting Stellar and Base today, with intent to route/bridge capital toward infrastructure yields + +### Resources + +- [Dob Protocol](https://www.dobprotocol.com) + +
+ Video Transcript + +[00:00] Hi and welcome to this week's Stellar Developer Meeting with me in the studio here. Today I have Fernando from dop protocol and I can't exactly remember. When I heard about dop protocol for the first time. But I do remember I noticed, that my feeds in all social media like LinkedIn X anywhere this company just started to pop up and there was articles and they were going to conferences and I just got really curious about what's up with these guys what are they doing. So and of course they were attacking Stellar in everything they were doing being awesome ambassadors. So I started to read a little bit up on what they're doing and since. Then I've had the pleasure of meeting the whole team several times last time actually this week in Silicon Valley where I met Oscar. But met the team also at Meridian many other places. So I think it's a great + +[01:00] opportunity to have them here in the studio and hear what they're up to, what they're doing and share with the rest of the community. So welcome Fernando and thank you for joining today. Yeah, thank you for the invitation Ken. I'm pleasure to be here. So should I just. Now continue with the presentation? Yeah, maybe just tell a little bit about who you are, how long time you've been working on protocol. A bit of an introduction. I'm Fernando Castillo. I'm currently the CTO of DOP protocol, one of the co-founders and our main goal. Now with the protocol is to be able to all of the infrastructure, that's not being funded to get funded by decentralized means. And. Now in + +[02:00] this presentation hopefully I'm going to show a way on how we can do, that with token studio and with the other tools we've been developing. So first yeah what's the team a bit of also an introduction car and already mentioned Oscar he's our CEO he's currently at Silicon Valley doing the embark pro program with Deron University in conjunction with Stellar also we have Simone Spinola who's our operation and business officer and myself as I said already and also as a side note I'm finishing my PhD until Berlin. Now I'm in Germany currently. So it's a bit late for me. Now it's 10 + +[03:00] p.m. But I'm happy to be here and yeah. So what's the problem, that we are facing is, that in Latin America there are a lot of infrastructure operators, that are lacking fund and they struggle to scale. Because on traditional banking systems the they require to go to ask to the bank to. Then request for a loan. Then go through all the bureaucracy of the bank. But. Then also time moves and they need the money to deploy and to create this new infrastructure, that is not risky by itself. But maybe the banks consider is risky. Because of the type of size of the company maybe or the type of emerging market. So it's. But it's not, that the business itself of, that infrastructure is non + +[04:00] investable or, that it will give money returns. So as I was saying let's imagine, that you are Mariano you are trying to get a traditional credit a loan for a bank. But this depending on the country also for example we've recently seen in Argentina how this country was facing high inflation rates or similar situation with Venezuela and other countries in Latin America with high inflation rates, that. Then this ties those loans to higher interest rates plus the risk on the country itself. So there's this problem of banks. Because of these situations not giving beneficial loans to these infrastructure operators and at the same time. If the + +[05:00] infrastructure operator would go for example for a crowdfunding or a regular crowdfunding solution where they could go online. And then they could say okay let's get all of these people request this amount of money. I don't know, let's ask for $100,000. So we can build this power grid based on solar power. Then how. Then this operator could return or distribute the revenue, that this energy production is generating? And how could they do it in a trust trustworthy fashion. So all of the people, that were investing or supporting the project can give the money back or the investment. And just as a + +[06:00] parenthesis latam infra operators are Latin American infrastructure operators. They are for example medium to small companies, that have this infrastructure projects. For example, a small or not. So small. But maybe a few kilowatts or 100 kilowatts of power into injecting into the energy grid. For example, a solar base or holic or there could be other type of infrastructure for example a data center or any type of infrastructure, that's not relying on a big company. Because big companies they wouldn't have this type of problem to access credits or loans from banks. So in our or what are we proposing as a solution we have this separated in + +[07:00] four steps. One is to provide a tool for the operator to hand out verifiable data from their existing company or projects. So they can for example validate okay this solar cell is going to produce this amount of energy. Because it has this specifications. I can I have already bought them for this price or I have this revenue evidence for similar infrastructure pictures or proof of location for example for where is it installed the infrastructure and, that's the first part of okay how can we give some due diligence for example, that's the same thing, that the bank would go. When when this person goes to the bank. Okay, + +[08:00] show me the papers. How can I know, that you are not going to ask for the money for an madeup project. But it's a real project and you're a real company, that was not just created yesterday. Then all of this information is naturally audited and revised by automated process, that are human le. What does, that mean? It means, that we check for example okay this is a receipt. But it has a digital signature. So it's not, that this invoice for example you get from another company paying for your service or for your infrastructure is making this up. But at least it's on the record it's on the ledger of the tax offices for example or the corresponding + +[09:00] corresponding regulatory office in charge of invoice. And then so they cannot be made up and the same with the revenues or how other distribution processes have been made. And. Then so and the third part is okay how can we. Then get this decentralized funding into the hands of Mariano the one, that was asking for the funding. So for this is where we have the smart contracts, that take care of this operation where the supporters for the project can support Mariano's infrastructure project. And then he can gets the fund to install or do the setup for their project. Then the same once + +[10:00] Mariano needs or is going to pay back with the corresponding interest to their supporters. Then the smart contract also takes care of, that with a decentralized distribution, that's transparent and it's auditable for all of the stakeholders. And this is currently on a suite, that we're calling DO token studio. There basically we have two types of users. Now we're thinking of a third user. But the two main users of you probably have noticed already are the infrastructure operator and the investor or supporter. On one side the infrastructure operator as I was saying takes care of uploading the information the device information how this is + +[11:00] technically behaving how is the type of revenue it's going to generate distributions or revenue distributions every month every week or every quarter or annually. And those are the those sets the expected APR, that the infrastructure operator is offering. Once this is done. Then this goes into the blockchain. So it's committed, that okay this is the information we're committed for the posterior auditing. And then as I said this is reviewed the authenticity and the validity of the information maybe you can just have a moment there's a couple of questions yes. So yes actually well going for + +[12:00] the first question what are latam in front of operator Latin operators are Latin American infrastructure operators. They are the ones, that want to get funded. RWS and infra are inherently illiquid. How do you address, that? Yes. That's one of the innovation, that we are doing is, that we are not tokenizing maybe, that was one point, that I was missing is, that we are not tokenizing the infrastructure by itself. But what we are focusing is on the revenue this infrastructure is going to generate and, that comes with a higher risk obviously. But then it balances is the higher risk with the with a higher APR. How do you solve regulatory + +[13:00] compliance low issues? Yes, this is I really don't think I will be able to address all of those regulatory compliance issues. But I can say, that. Because of the nature of the project, that as I was saying earlier we are not tokenizing an asset and it itself. But we are working with the future revenue it will produce, that can solve other type of compliance around regulation issues. But also it's important to mention, that this is very local and depending on each country finally. So it will it won't be the same for example the same in the US and here in Germany or in Chile okay. Then wait pay is this a loan or is it a revenue participating in the project + +[14:00] project Yes. So this can be considered as m multiple things. So in the end is the this revenue for the what the infrastructure generates, that's as I was saying is what's being kind of tokenized. So. But then. If you look at from a different perspective. Then or the traditional perspective this could be taken as alone from the chat file. I see Oscar was as. Now is there I don't know is who review central yes I think, that's those are most of the questions. Then yes this is obviously this is a regulated service it cannot is not the + +[15:00] wild west. But now I will focus mostly on the technological part of how we are addressing these issues. Because for compliance I think we can take a bit longer to answer all of those questions. Okay. So I'll continue then. So with this we are offering multiple features in this platform, that this we can submit multiple type of evidence. This is based on the concept of trusts where we are producing evidence for all of the stages and all of the interaction the infrastructure and the operators are generating and producing. What does, that mean? For example. When the operator the first one I mentioned it already, the operator can submit, okay, these are the technical + +[16:00] specifications or this is an invoice. But. Then we can get more complex evidence. When we start to aggregate different types of evidence. Because one in invoice alone for example would mean okay you will receive this payment in the future or you are receiving it. Now but. Then to do the accounting evidence for example for, that we need to connect with the ERP of the business and, that's more complex than just verifying a single invoice. Then humanoled guided integrity check means, that even, though this project or what we are doing is highly depending on automatization automat automation we still require people in the loop. Personally looked at. So + +[17:00] everything goes yes as I was mentioning earlier on the accord regulation. Then will rely on the onchain verification module. So all of the transactions especially for the stakeholders, that are supporting the project or the infrastructure operator they can see how all of these actions are being verified and chained how they are transparent with the respective proofs. And finally as every project is different all of them will have a different type of scorings for and with this will represent basically from high risk project to and higher APR and lower risk and lower. Therefore APR + +[18:00] and in practical terms how is this the overview of this smart contracts in particular for the token studio is, that we as the or the infrastructure operator deploys the splitter contract and there we have different functions to define the shareholders how are they going to do the withdrawals every how often are going to be the distribution events and to, which token is this binded regularly we will be working with USDC. But for example this could take other type of tokens. If that's how, that project is defined. But most of most + +[19:00] of the time we work with USDC and in simple terms it's super s I think it's super easy to understand. If there's one chair holder or token holder with the 10%. Then once the distribution take place all of the revenue, that was generated and was deposited in into the smart contract. Then it will go a 10% into the one, that has a 10% of the tokens. So on how this distribution flow goes is, that first we have the admin, that communicates to the distri to the splitter contract, that's going to do the distribution what's the token address. Then this is + +[20:00] verified inside of the smart contract. Then it verifies. Then check the current balance for example are those Are there 100 tokens to distribute. And then in each case it's the loop checking on the token holders and the respective balance and there, that's how it does the calculation and this goes. Then into the storage class where finish where it's finished with the distribution. It's really straightforward. Then for the withdraw. Then the each shareholder or token holder goes into the splitter it does the prospective verification check with the storage what's the current allocation. Then decrease on the + +[21:00] storage part. Then communicates directly with the token does a transfer with a good successful withdraw. And then it goes again to the threshold and at the storage level this is just in case I'm presenting it this like this. Because we are currently going under the respective revisions and auditings. So it's it cannot be everything public. I mean this can. But not the code itself yet. And. Then we have the cher data key, and the allocation data key, which basically defines how is the charge for each data holder, who's the admin, all of the security, + +[22:00] what's the allocation for each token holder and. And then how this can be allocated each time the distribution goes. Then here on the security level on how we are defining we are taking this very seriously defining the access control policies admin only function the authentication the lock on contract the transfer contracts also we have our internal scripts for doing the a to have a deployer and do an anatomical operations. So we don't run on front run is front running. Then we have allocation limits some validations and. And so on. And this as a first case we've presented this to investment of returns + +[23:00] for the investors or supporters of the project of infrastructure. But this for example could be used as token vesting models over team members on time for similar to the revenue trading for a DAO treasury or for reward distribution. And. Then for the particular the feature, that were missing for before are, that this is a secure and transparent way on how this distribution method is performed. Because all of the transactions are done on chain and, that these transactions are. Then proportional to each one of the supporters. So yes, thank you very much. I hope I didn't pass on time and + +[24:00] we have a few questions. In in the chat I think. Okay. I think the answer yes. So where I was I left is it either alone with interest or is it at your particip Yes. As I said, it's more of a chair participation of the project. I was making the loan with interest at the beginning to make the analogy with traditional banking settings. Who will review centralized entities? Yes. I mean this needs to go all of the businesses and infrastruct, which are the infrastructure operators in this case goes into a know your business process. Because this has to also go with antimony laundering policy and all of those regulations. Does, that mean, that the entire review + +[25:00] process and due diligence is no the proofs or more than the proofs the hash of the proofs and the evidence are stored unchanged. Because we cannot just publish yeah accounting states or financial states from companies on chain. Is it really 100% of the project's revenue or is it some Yes. So. When when we consider this there's for the for example the energy production, that a solar panel is going to generate it's going let's say it generates 100 dollars per week. Then from this there's a minimal amount, that's defined during the creation of the project, that needs to go by to + +[26:00] the operation itself. That could be for example 30% or 20%, that depends on the project. So it's finally on the discounted from the operation then, that's what goes into the what's the distributed how could you manage. Then state expiration. If you are storing hashes on chain do you mean like. If a proof is expired for example I'm not to, which state do you mean? World matias. Yes. So, that is so. When proofs are verified + +[27:00] it depends on the type of pro proof and there are multiple risk. When you verify proofs. Certainly one is for example using old proofs or reusing proofs. Other is just trying to use past evidence as newly evidence. But all of those parts are very part of the ver, that of the offchain verification. And. When the onchain verification happens this proof is has been already through this process. Yes. So currently the guarantees where do I pass this? Ah yeah let me answer this first. Yes. So there's currently + +[28:00] a new feature we want to do is to actually put some collateral, that can give stakeholders more guarantees. So ina. So we would. Because from the beginning this sounds more like okay we will we could be giving random money to a strangers stranger, that maybe at, that point everything was true. But then still did a rock pole as you mentioned and. Therefore we are creating this other collateralization mechanisms but, that's not defined yet. But, that's why also on the first stage of this we are focusing only on companies, that we know, that can give this guarantees and are not a meme company for example. So + +[29:00] that's one really important point. So Stellar asset or so. So. So the tok the Stellar the artist this token actual Stellar asset or Soroban tokens. So as we as I was saying it's the token, that's distributed is a USDC and how the asset of the token holder is also a token. Because why and why not a regular seller asset. Because we needed to create some extra functions, that were not into the assets. Okay, great. I think you got caught up on all the questions. It's great to see, that there's. So many questions. So much interest in what + +[30:00] you guys are doing and how you structure it. So yeah, it was great. If there's no other questions. Then I guess we can stop here. But thank you so much for joining today. I think it's super interesting to see what you're doing. It's I kind of like these type of projects, that are not the typical projects we see. It's definitely using tokens and using the platform in a different way. I think, that's, that's super interesting. Yeah. Yeah. Just let me add there's a last one. Yeah. Yeah. To to answer, that last question. Yes. One of our also our current goals is to bring liquidity from other blockchains in particular from Ethereum based blockchains liquidity and, that's + +[31:00] because actually we're not only based on working on Stellar. But we're also working on base and we see this potential from bringing more liquidity to Stellar from base through this infrastructure projects and yes we are taking a look at near intense. Great. Well, thank you for joining everyone and thank you for joining too, Fernando. It was great to catch up with you and I'm sure I'll see you at events around the world like I have this the past year. Thank you for the invitation. It was a pleasure. Yeah. And and. If anyone else has any questions, I'm sure you can reach out to the team on Discord. I think one of the things, that's very interesting about this is, that brings up some use cases and some things, that are, that + +[32:00] that raises some questions. Because maybe we haven't seen, that before or maybe we're not used to thinking about tokenization in this way or using the platform. So, a lot of great questions, that really I think you had great answers and it's interesting to see how you're building this. So, thank you everyone and see you again next week. Thank you. Bye. + +
diff --git a/meetings/2025-10-30.mdx b/meetings/2025-10-30.mdx new file mode 100644 index 0000000000..c9b34964ea --- /dev/null +++ b/meetings/2025-10-30.mdx @@ -0,0 +1,91 @@ +--- +title: "OpenZeppelin Relayer" +description: "OpenZeppelin presents its Stellar Relayer and Managed Service, including a channels plugin that enables parallel transaction submission via channel accounts, automatic fee-bump (gasless) execution, and secure key management for scalable Soroban and Stellar transactions." +authors: [carsten-jacobsen] +tags: [spotlight] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session continues the OpenZeppelin tooling mini-series with a deep dive into OpenZeppelin Relayer for Stellar and the upcoming managed service offering. The presenters explain how the relayer simplifies transaction submission by handling operational complexity like retries, fee management, and Stellar sequence numbers, with support for Soroban contract deployment and invocation. + +A major focus is the channels plugin: a scalable approach to parallel transaction processing using a pool of channel accounts. The team demonstrates how requests acquire a channel account, sign the transaction, wrap it in a fee-bump transaction for gasless UX, submit it to the network, and release the account on confirmation—enabling many transactions in parallel from a single logical source. + +### Key Topics + +- What OpenZeppelin Relayer provides on Stellar + - High-throughput transaction submission and reliability features (retries, sequencing) + - Support for Soroban smart contract deployment and invocation + - Standard Stellar payments (e.g., XLM transfers) + - Secure key management options (KMS signing, Turnkey signing, local signers) +- Plugin architecture and the channels plugin + - Extends the base relayer via TypeScript plugins + - Manages a pool of channel accounts to avoid sequence-number bottlenecks + - Workflow: + - Acquire and lock a channel account + - Sign the user transaction with the channel account + - Wrap in a fee-bump transaction (gasless execution) + - Submit to the network and return results + - Release the channel account for reuse + - Throughput scales with the number of channel accounts (e.g., 200 accounts → 200 parallel txns) +- Pooling implementation details + - Uses Redis to coordinate channel account locking and availability + - Supports listing, adding, and removing channel accounts via SDK methods +- SDK and demo + - Simple client setup using an API key + - Example Soroban submission using a function XDR payload + - Demo showing multiple transactions sent in parallel from the same account +- Managed service overview + - Hosted infrastructure using AWS (Fargate tasks + shared Redis), fronted by Cloudflare and a load balancer + - Intended to offer a quick path to adoption without running your own infra +- Self-hosting + - Fully open source; can run locally with Docker-based example configuration + - Same plugin and channels capability available outside the managed service + +### Resources + +- [OpenZeppelin Relayer docs](https://docs.openzeppelin.com/relayer) +- [Repository](https://github.com/OpenZeppelin/openzeppelin-relayer) + +
+ Video Transcript + +[00:00] Hello and welcome everyone to this week's Stellar Developer Meeting. This week we are talking to OpenZeppelin and the primary focus of this week's meeting is the relayer service. So yeah please introduce yourself and we'll get into it. Yeah. Hi thank you. Thanks for having us. So my name is Dylan Kilkenny. A software engineer at OpenZeppelin and the past couple of months mainly working on the relay infrastructure, the plug-in systems and specifically the Stellar integrations. Hi. Hi, my name is Sai. I'm the engineering manager managing relays and monitors at opening. Hi, my name is Collins a site reliability engineer supporting the relayer u plugins, that you are about to present. Present. Great. Thank you. So we already talked a + +[01:00] little bit about a just very high level overview in one of the previous talks we had with open selein. So, I'm excited about diving deeper into what it is, how it works, and what you can do with it on Stellar. So, please share your screen and let's get started. Okay, perfect. Thank you. One second. Okay, you can see my screen. Yeah. So what we've been working on the past few months what's kind of worked [clears throat] towards is this manage Stellar relayer service and like in a nutshell what this offers is a scalable high throughput transaction service for the Stellar network and there are a few components, that were involved in the making of this and of course the first of all is the OpenZeppelin reer. So I'm just going to go through like a few of + +[02:00] the things, that got us here and like what it took to build this like first of all you've mentioned you've already spoken about like the relayer service. But just to go over quickly again the OpenZeppelin relayer is an open source tool and it simplifies the transaction management for multiple different networks including Stellar and what, that means is like it'll handle the gas it will handle the sequence numbers the reliability like the retries. If a transaction fails. So like this is useful for a number of reasons such as gasless transactions or automating transactions or like scalable workloads. But one of the valuable aspects of this type of tool is, that it's secure and secure key management is one of the most important features. And in the case of Stellar, we support KMS signing, turnkey signing, or local signers even. So yeah a couple of months ago or a few + +[03:00] months ago we started working on a Stellar integration for this tool and in doing. So we. Now offer full Soroban smart contract functionality. So it allows users to deploy contracts through the OpenZeppelin relayer. They can invoke contracts. Then standard Stellar XLM payments. And like as I mentioned this offers this is included with KMS signing and turnkey signing. But there are some limitations to how you can send transactions through the Stellar network and one of, which would be the sequence number having I'm not sure how to put this clearly. But causing issues with scalability like high throughput transactions. And. So we were working towards developing some features within the relayer, that will allow the Stellar relayers to scale larger. And one of the features, that enabled us + +[04:00] to do this was the relayer plug-in system. So in a nutshell what this does is allows us to extend a standard relayer with ex with further functionality. So from a developer's perspective, what you would do is write some TypeScript code and inside this plug-in, you would have access to the relay system for submitting transactions, signing transactions, querying state, etc. And you would still communicate to the OpenZeppelin relayer through the rest API. But you would be calling this plug-in directly. And in the case of Stellar, we've used this system for developing a channels plug-in. So the channels plug-in allows users to submit transactions in parallel and the key workflow here is, that we use channel accounts for building the transaction. So in this plug-in system in this spec specific plug-in called the channels + +[05:00] plug-in we manage a pool of channel accounts and the user sends a request and we acquire a channel account. We sign the user's transaction with, that channel account. And then we submit it to the Stellar network with a fee bump transaction as well. So it's also gasless. So yeah parallel transaction processing automatic fee bumping and this includes an SDK client so, that users can quickly send transactions or pain painlessly and I'll give a demo of, that at the end of this anyway. But just to walk through the flow of what happens here. So from the client the user will send a request. The request goes to the relayer and the reer invokes the plug-in. The plugin. Then acquires a channel account. This channel account is managed by the realer, that I must say all of these accounts are managed by the realer. But the plug-in is able to lock one. And then sign a transaction with, that channel account. And, that transaction is the one, that the user sent. And we. Then wrap + +[06:00] that in a fee bump and submit it to the Stellar network. Once the transaction confirms, we release the lock and we send the result back to the user. So the system is scalable by the number of channel accounts, that we have to offer. So. If we. If we put 200 channel accounts in the system, you can send 200 transactions in parallel with one user account. And just quickly on the account pooling system we have here. So the plug-in system behind the scenes is using Reddus to manage the pool of accounts. So an incoming request comes in, we try to lock the account. If it's available, we can acquire it, process transaction. If it's not, we try again on the next account. So it's pretty straightforward. And just quickly on the SD SDK client itself. So this is what it would look like for sending transaction. So we have this channels + +[07:00] client and right. Now the base URL is pointing at channels.openzeppland.com, which isn't live yet. We're still working on getting, that live either tomorrow or by Monday. But this could also be you can also send this these requests to your own self-hosted relay. Right? So this doesn't have to go through the managed service, that we provide. So this plug-in is all open source. The open reception relay is open source. So anyone can just spin up their own instance and manage their own channel accounts. But we will be offering this as a service and man managing it all for you as well. So it's quite simple. Set up the client and to submit a transaction you in this case we're submitting a sorbent transaction. So you just provide the function XDR and an example of the response would be like. So so this is the transaction ID for the relayer and the hash for the + +[08:00] transaction itself. So in the case of someone who is running a self-hosted version they would want to manage the channel accounts. So there is some methods in the SDK, that will allow you to list what channel accounts are available on the service and also add new ones. So. If you needed to increase the throughput you could add more channel accounts or. If there's some issue you can remove some etc. So yeah in a nutshell all of these things have come together and we're offering a Stellar relays managed service. So this would include the relayer service itself and the channel accounts plugin the SDK. But also the cloud infrastructure. So just a bit of a like a peak into what it takes to run this on the cloud. So we are using AWS Fargate for managing diff v a v various number of tasks + +[09:00] running the relayer service and a shared reddus instance. So we have the same pool of channel accounts across tasks. And the client's SDK goes up through the Cloudflare proxy sends it down to the load balance balancer, which. Then distributes it to whichever task is free. So yeah, that's pretty much the overview and I can just give you a quick demo right now. But first, yes. So like this is just a demo script as we've already seen how you would instantiate the client and I have a little helper function here for building a sorbent transaction and here we pass the function in a to the client itself. So. So just want to point out first, though that in order to use the service you need to get a API key. So you can just call + +[10:00] the domain channels.open.com/gen to get an API key. And for the sake of this demo, I'm just going to add the API key here and call the demo script. So this is going through the staging environment, that we have for the managed service right now. Okay. So the transaction is confirmed. Just to double check, that yeah transaction was successful. And just to demonstrate the parallel processing of this I also have a parallel demo. So, this is sending 10 transactions at the same time with the exact same account, that I sent the demo transaction with previously. It's just + +[11:00] one account. And yeah, that's pretty much it. Any questions, let me know. Great. So, I'm a little bit curious about the managed service. Because it seems like of course depending on your use case, that especially for maybe testing it out, it seems like a lot more convenient to just try it out with the managed service. How how does, that work? How do I get set up with, that? So we are finalizing the documentation and the SDK today. So once we have all of, that documentation published on our site the your community will be able to access it and it's a pretty straightforward setup like as you seen with the demo. [snorts] yeah. Okay. Great. And and. If I want to run + +[12:00] this on it is open sourced. I see, that I can run it on my own environment. What do I need? I saw you mentioned the Reddus database. Is there anything else I need to set up to run it? Yeah, it's a good question. So we in the documentation we actually have all of this listed out and we provided in the OpenZeppelin repo OpenZeppelin relay repo an example configuration using docker. So all with what someone would need to do is go into the repo and run, that docker config and it would spin up the service with the channels plugin already installed and everything's ready to go like all you need to do is start sending requests to it. But the OpenZeppelin real air like reddus is required of course you need the real air installed and all the various other dependencies. But there are some example configs, that get you up and running pretty quickly. Okay, great. Sounds pretty easy, though. + +[13:00] what are the use cases where who do you think are going to use this the most? Any specific dApp types or any use cases, that you have in mind, that you think will adopt this? Yeah, look, I think it's super good for someone who needs high throughput transaction processing. And also the ga the gasless aspect of it is super convenient as well. So users don't have to worry about sequence numbers. So you can see apps like games or various other services, that might want to use it. But even you someone who wants to self-host this themselves, the it's open source. So you can extend this any way you want like the core primitives are there, which is gaslessness high throughput like sequence number management etc. And you can build on top of, that like whatever you need. Great. Do you have any I know you + +[14:00] you mentioned, that the documentation and everything for the managed service is going to be available very soon. But do you have any examples of how to use it? I know you showed one. But do you have like a GitHub repo or an example library? So, So, we have a plug-in repo up right now. Just one second. So there are some examples in this repo. I'm just going to share it in where should I share it? Can you share it in the post it as a comment? Okay. Comments. I shared it in the private chat. Because I don't think I'm logged in. I'll just copy it over. So yeah, there's a pretty extensive overview of in the readme of this + +[15:00] repo of how the plug-in system works for the channels the channel account. And once the service is live in production like it's only a case of being of substituting your local relay local host with the production URL and also I'm just going to share the examples from the relay repo as well and, that should give honestly with just these two things you can get up and running locally pretty quickly. Quickly. Okay, great. and this is the example in the relay repo. And there's also a read me in, that as well, which is extensive and it should get people up and running. Yeah, I have actually gone through the read me and taking a + +[16:00] look at it and it does look it looks like the it's very detailed. So I just haven't I haven't tried to set it up myself yet. Okay. Looking looking forward to, that. Yeah. Well, let me know. If you have any feedback. I hope, that it's simple even, though I think it is. Yeah. Any any questions from anyone watching. Then feel free to post it in the comments. Carson, I just posted the documentation, which should be available on docs.openszprint.com. So, I just sent the link. Yeah, I posted it. Thanks. Great. Well. If there's no comments or any questions from Let me just check one more time. Doesn't seem to be the case. But it was great to get a little bit more information about the relayer + +[17:00] and to see an example of how to use it. I think it's a super helpful tool and I think for. Now I think I will try using the manage version first u. When it goes live. It seems like an easy way for me to test out the tool. I don't necessarily need to spin up my own infrastructure for this. So I'll look forward to have an opportunity to play with it. And thank you everyone for watching and thank you all three of you for joining today. And please go check out the links take a look at it and yeah, thank you all for joining. Thank you. Okay. Thank you. Bye. Bye. + +
diff --git a/meetings/2025-11-06.mdx b/meetings/2025-11-06.mdx new file mode 100644 index 0000000000..6fcfc91af4 --- /dev/null +++ b/meetings/2025-11-06.mdx @@ -0,0 +1,168 @@ +--- +title: "OpenZeppelin Smart Account, Vault, and RWA" +description: "OpenZeppelin reviews three major Soroban releases—Smart Accounts, Vault, and RWA—showing how programmable authorization, standardized yield vaults, and compliant real-world asset tokens extend Stellar with better UX, composability, and regulatory tooling." +authors: [carsten-jacobsen] +tags: [spotlight, CAP-71] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This session covers OpenZeppelin’s Q3 Soroban releases and how they address common developer challenges around authorization, yield strategies, and compliant asset issuance. The discussion highlights how account abstraction, standardized vault interfaces, and modular compliance layers can be composed together to build safer, more user-friendly Stellar dApps. + +The presenters focus on practical design tradeoffs: abstracting cryptographic complexity behind smart accounts, preventing known economic exploits in vault math, and enabling real-world asset tokens to meet jurisdiction-specific compliance requirements while remaining reusable and extensible. + +### Key Topics + +- Smart Account framework + - Implements account abstraction on Soroban using the custom account interface + - Authorization is defined through up to 15 context rules per account + - Each rule combines signers, scopes, policies, and validity windows + - Supports multiple signer types + - Delegated signers (Soroban accounts/contracts via `require_auth_for_args`) + - External signers routed through verifier contracts (enables new curves without redeploying accounts) + - Policies are modular contracts (stateful or stateless) + - Examples: multisig thresholds, spending limits, subscription windows + - Lifecycle hooks: install, can_enforce, enforce, uninstall + - Enables use cases such as passkey-first wallets, time-limited dApp sessions, automated multisig, bots/AI agents with bounded permissions + - Discussion around naming clarity and the need for more end-to-end demos +- Vault (ERC-4626–style yield interface) + - Standardized API for managed yield strategies + - Deposits mint shares; redemptions burn shares; yield accrues by increasing assets-per-share + - Detailed walkthrough of rounding rules for mint/redeem vs withdraw/deposit + - Explanation of “inflation attacks” via donations + zero-share mints + - Mitigation using decimal offsets and consistent rounding behavior + - Specification merged during the session with contributions from GS Maxi (Sentinel F) +- RWA (Real World Assets) token stack + - Base compliant fungible token with additional controls + - Forced transfers + - Full or partial balance freezing + - Recovery flows for lost keys + - Pausability + - Identity layer + - Claim-based by default (e.g., KYC, residency), capped at 15 topics + - Trusted issuers sign claims + - Designed to be swappable (Merkle- or ZK-based alternatives possible) + - Compliance layer + - Pre- and post-hooks on transfer, mint, and burn + - Multiple compliance modules can observe the same hook + - Designed for reuse across multiple tokens (e.g., shared bank rule sets) + - Access control options + - Simple owner model or granular role-based control + - Extensions + - Document Manager for anchoring off-chain legal documents via hashes + +### Resources + +- [Smart Account docs](https://docs.openzeppelin.com/stellar-contracts/accounts/smart-account) +- [Vault docs](https://docs.openzeppelin.com/stellar-contracts/tokens/vault/vault) +- [RWA docs](https://docs.openzeppelin.com/stellar-contracts/tokens/rwa/rwa) +- [CAP-0071](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0071.md) + +
+ Video Transcript + +[00:00] Hello and welcome everyone to this week's Stellar Developer Meeting. With me today I have two guests, that have been around here before from OpenZeppelin. And what we're going to do today is we're going to take a look at what has been added to the OpenZeppelin tool chain and the libraries, that are supporting Stellar for the last quarter. So I'm super excited. I know we're going to talk about smart contracts. We're going to talk about WAS. So. So yeah, please introduce yourself and let's get started. Yeah. Hello everyone. It's really a pleasure to be to participate again at the Stellar Developer Meeting. My name is Buan. I'm open source developer at OpenZeppelin. and I'm + +[01:00] building the Stellar contracts library. Hi everyone, it's Ugun here. pleasure to be here. And I am also an open source developer at OpenZeppelin. We are a team with Voyan trying to bring trying to improve the smart contract ecosystem for Stellar Sorovan. Great. Thank you. So we've done quite a few hackathons this year and it's really great to see, that some of the developers they're starting to use open sin software and the libraries and the tooling. And I think what you're going to show today is going to be really interesting for a lot of developers. So. If you have a presentation you can share. Yes. I will start. let me show my screen. + +[02:00] [clears throat] Yeah, there it is. Sorry. Sorry about, that. yeah. So I'm going to present to you today the [clears throat] smart account framework, that we created with released with the latest u latest release of our library. Just to mention, that yeah already a month ago. When with some of you we met at Meridian in Rio. How excited I was and I guess it was the case for everyone to use pass keys to have all this + +[03:00] like wallet experience abstracted into smart account. So our ambition is to make this the new normal for Stellar. So yeah we are building this framework u with, that in mind. So without further ado yeah let's dive in. First a point about naming and why we chose smart account and not smart wallet. First [clears throat] the Soroban SDK defines custom account and custom account interface and our design builds on top of it. So, that's why we decided to choose smart account as a naming and yeah smart account + +[04:00] must implement this interface. Then why smart accounts are necessary? Let's just explore a very simple scenario. Where a smart account, that holds some fungible tokens and the owner of the contract wants to transfer some amount and, that's why they need to call a function on the USDC contract. In this case, they will make use of a cryptographic key, that is registered at this smart account to authorize the operation. And by looking from this perspective, we the perspective of account abraction, we are establishing a boundary between the account, that holds the sets and the keys, that control them. + +[05:00] now let's try to define what a smart account is. So let's this is a smart contract first and foremost, that composes authorization intents, that are coming from multiple sources and this high level definition will guide us through the all the components of our framework. With this in mind, let's first see what are what the authorization sources could be. As in the previous example, an authorization source could be a cryptographic key or there could be multiple cryptographic keys for the same smart account or it could be a G account, that authorizes on behalf of the smart account or it could be any combination of these cryptographic + +[06:00] keys, G accounts or other smart accounts and smart contracts. The next part of the definition talked about authorization intents. What are and yeah we will see what are the core elements of the authorization intent. We can examine this by answering these three questions. We can define who is allowed to act what are they allowed to do and how those permissions are enforced. Who is allowed to act? This might be already clear. Those could be some keys from different cryptographic curves also G accounts C accounts. What are they allowed to do? Well, here this can be literally any kind of action either a specific function on + +[07:00] a specific contract or just transfer the transfer function on any SCP 41 compatible contract deploy deployments as well. So yeah, any kind of action how is this enforced also many different combinations are possible. It could be key one and key2. Key1 or key2 key1 and some other conditions, that are not derived from the from a cryptographic scheme and to give names to those groups. Who is for signers? What is the scope and how is for the policies? How do we compose those elements? So we have signers scope and policies and we embed them in the entity called context rule. Here you see, that + +[08:00] besides the signer scope and the policies we also have the notion of validity. Context rules function like routing table for authorizations for every context. They they specify the scope and the conditions, that must be met before the authorization is granted and yeah we'll see how this goes very shortly. So a smart account is stores a set of those context rules and up to 15 rules per we can have up to 15 rules per smart account. We added this limit to ensure there a reasonable resource consump consumption and to encourage proactive management. For example, to encourage you to remove the expired or unused rules. And coming back to this authorization + +[09:00] mechanism or algorithm, actually it's pretty simple. When the authorization from a smart account is required first we gather and order all non-expired rules. And then we evaluate them starting from the last addit one. Now let's zoom into signers we have two types of signers delegated and external a delegated signer can be any Soroban address, G account or C account. And on the right we group all cryptographic types of schemes or keys in the external variant. And we'll see just in a second why we call it external. But first how the delegated signer grants an authorization. Codewise it's very simple. We use the built-in + +[10:00] require out for arcs function and. However there is a catch I of how we are actually constructing this transaction I won't go I won't dive into this sketch I it's like you can find more information about it in the documentation and there are some code snippets explaining how to get over it. Just mentioning it and, that the [CAP-71](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0071.md), that hopefully gets included in some of the future protocol updates will resolve these issues. So yeah very soon I hope it will be a u no issue. The second signer type is the external one for authorizations coming + +[11:00] from cryptographic keys. But before we look into, that, I'm here providing a list of the curves on Stellar there the already available ones, that are supported and BN 256, that might get added with Protocol 25. And yeah, I guess there could be many more in the future. And you remember the structure of the external serer like to go back it's a tle of address and by and byes we could have defined its structure like this by enumerating all available curves and the [snorts] bytes, that should be interpreted according to the curve type. But you would agree, that this approach is not very scalable. Because on every new curve, that is added + +[12:00] you have to upgrade your account. If it supports it and this is not practical at all. Another drawback of embedding signature verification into the smart account is, that it binary size will grow and the code complexity will increase as well. For example, the ED the classical ED 25 119 like it's just one liner. But pass key verification for example is pretty much is more evolved and requires more like more code simply this pushed us to adopt a way more flexible and scalable approach, that is outsourcing and externalizing all type of signature verifications. So we have a verifier contract, that stands for this address and the public key, that is the bytes. + +[13:00] this verifier contract is a special one. It is deployed only once per curve type or scheme and should be immutable. It's it shouldn't keep any state and in such way a single verifier contract can validate signature signatures for any number of keys. And yeah this was made possible thanks to Protocol 23 and, that made the cross contract calls really cheap. Now let's look into policies. So we have here one context rule with the scope, the signers and the policies. Let's say we have three signers, two cryptographic keys and one G account. And actually the policies they modify they customize the + +[14:00] how signers behave. Here in this case we require any two of those three signers to sign so, that the authorization passes it's the policies also can modify some other aspects of the flow here. In this case, we want those rule to be enforced only for once per month. So, that like you can for example pay for a subscription. So policies are external contracts as like verifiers. The verifiers are external. So policies are also external contracts. Policies can be stateful or stateless meaning they can map some state for a specific account and context + +[15:00] rule. They can be shared across many different accounts or can be tied just for a specific smart account. And the other aspect is, that they have a four stage life cycle. Which is visible from the interface, that those policies must adhere to. Now. If we look into this spending limit policy, it is let's say it's a stateful policy and it is shared across multiple smart accounts. When this policy is added to a context rule, the install hook is called and this in its the storage for the calling account and sets the amount and the time period. For example, here we'll have 100 USDC and one month. This is the in it params, that are pass can enforce + +[16:00] is just a read function, that is invoked every time the evaluation loop runs. Enforce on the other hand it requires authorization from the already installed smart account and modifies can modify the storage and can emit events. Uninstall is called. When the is removed. So we are cleaning up the policy storage. So install and uninstall are used only once can enforce on every evaluation loop and enforce. When context rule is matched. So these are the main elements of smart account and yeah to sum it up we have we can have up to 15 context rules per smart account and each context through contains signers and policies. + +[17:00] So let's see some use cases. Yeah the most like trivial one is the multisix threshold bas based or like the weighted multics. We can have with this u setup we can have time limited app sessions. For example you install context rule, that is specific for a specific app and it's valid just for one day. So you don't. So you can interact with this app without needing to approve any further transactions. You can add for example the keys for AI agent or bot to as a context rule and allow it to spend some like limited amount of your assets. So it can trade for example and you don't risk to get wrecked or + +[18:00] you can subscribe for some services. So yeah let and here let's see how the subscription the flow the user flow for a subscription will happen. So I have a smart account and I have the sudo rule like the super admin rule, that is has a single signer my pass key and there are no policies and it's valid forever. Then I'm going to u some dApp and I want to subscribe to a service. The dApp prompts me to install a context rule, that contains the scope of it is the USD contract. Because I'm going to pay in USDC. The signer is the D pub key. And the policy is some spending limit polic + +[19:00] policy, that limits the spending for up to 20 bucks per month and I'm subscribing for one year. So this is the single transaction I have to do u in order to subscribe for this service. And from this moment on the dApp developer for example they can use the OpenZeppelin monitors to subscribe to the install event on this policy and. When this gets triggered they will with a relayer they with the opposite layer they will charge the user smart accounts every month 20 bucks. So the key takeaways this framework is context centric framework. I will compare it as the I'm comparing it very + +[20:00] often with the web to login experience. For example. When you are logging it with your Google account or your GitHub account like you authorize the third party with some specific permissions. We grant some scope permission. You don't grant the whole access to your data. But just to very like scoped permissions. So we are composing authorization intents through those context rules, that contain signers and policies. Another very important aspect is, that signature verification is not hardcoded into the smart account. So it is this is very flexible and very scalable and yeah like this framework enables programmable authorization in, that manner. + +[21:00] manner. So yeah. If you want to play with it I'm inviting you to go to our repo and to check our docs. And yeah I'm pausing it here for questions. If there are any. Great. It it was really interesting. We have seen a lot of interest in using pass keys and I think there's. So many ways to do like there's. So many capabilities in Sora for authorization. But it's also complicated. It's not it's not straightforward forward and easy. I think the wealth of opportunities to create custom policies and authorize your users in different ways. I think it's great. But it's also a little bit complicated. So, the framework you present here makes, that a lot easier. I + +[22:00] think this will be a great way to add a flexible authorization to your dApp. So very interesting to see. I think I'll drop the link to the repo or to the documentation. But. If anyone has any questions, I think yeah, Elliots has a question here. Yeah, did you say, that Ozair can be used to index track additions removal or Yeah. Yeah, definitely. I mean you there are some events, that you can subscribe on subscribe for those events on the policies or on the your smart account and yeah do yeah trigger some actions according to whatever you want to do. one naming concern, smart account + +[23:00] will likely overlap with Stellar accounts and could confuse the idea, that this is a contract. Mhm. Yeah. yeah. I presented why we ch why we went with smart account. yeah. Mentioned, that this is this may seem a better fit for yeah this kind of entities Stellar accounts you mean the classic Stellar accounts I yeah I suppose this is what you mean. But yeah I a smart Don't Yeah. We have to give it a thought. But I think we can it can + +[24:00] be disting distinguished well enough. So not to be confused. It would be great to include a 15minute demo of building an actual d. Great suggestion. [laughter] I'm taking it. Yeah. Okay. Then I'm passing it to O. Right. Hi again everyone. I'm going to be presenting the world token world and also RWA. So let me share the screen. Let's do entire screen. Please tell me. If it's successful. Because I don't see the streaming tab right now. Yep, we see your presentation. + +[25:00] Great. Great. Let's try slideshow. So, what's it, that I'm going to be talking about? It's a uniform interface for integrating with various yield generating strategies also known as ERC 4626. If you are familiar with the Athereum ecosystem. The problem tokold solving is let's say you have a th00and USDC and what are you going to do with this? You can lend it, for liquidity or you can stake it, right? But these are all the options you can do or you can have a manager of your funds for utilizing the best option out there. But for every manager you will have + +[26:00] different problems. There is no composability. The UX in general would be bad. So, vault tries to standardize all these and it still can be managed. So, this is what we are trying to do. So, you deposit your assets into the world. The world tries to utilize your assets in the best way and earn some interest and you will get shares in return. And you can always do transitions between shares and assets. You can convert them interchangeably. So how do we calculate shares and assets? So let's say you deposit your assets and you will get shares. The basic formula for, that would be we + +[27:00] will multiply the deposit assets by the total shares currently in the world. And then we divide the this result by total assets in the world and, that whole result will be your shares. This will be very important. So now, that I'm going to do some basic math. But it's very basic. So don't be scared. First action Say Alice deposits 100 USDC into the vault. So for deposit assets it's th 100 total shares it's again 100 and total assets is 100. So let's say 10% yield is earned, and now the asset amount per share has increased. So previously one asset was corresponding to one share. But + +[28:00] right. Now since the total shares have not changed it only the interest rate increase the total asset amount in the world. Then this ratio should change right now. If you provide one share you will get 1.1 asset in return. Because one 10% of interest rate, and now Bob say Bob deposits 300 USC and we do this calculation and Bob will get 272.73 shares in return. So it's not one to one mapping as in the case for Alice. Because there was 10%. And this is still fine. Because if you multiply the shares by 1.1 you will get 300. So it is + +[29:00] still fair. And let's say another 10% of interest has happened, and now Charlie deposits 200 and the amount for shares would be 165. The problem is in this previous slide is we have these decimals, right? This is not an integer. But it's kind of a floating number. This is problematic. Because how do you handle 3.33 shares for example? You cannot. So for shares we round up or down. When we do we round down. When do we round up. So let's say you are minting or depositing depo depositing we are rounding down your number of shares so, that you'll get slightly fewer shares than the exact ratio. The reason is + +[30:00] world cannot overmint shares and we have to protect the world not the users so, that attacks won't happen. So this is the same this rounding down goes for minting and depositing and the opposite one rounding up goes for redeem and withdraw operations for gold, which creates another problem, which I will demonstrate right now. Let's say okay this is the inflation attack. There is couple of versions of it. Let's say the attacker deposits one's troop. So he minted a share and right. Now one share will correspond to one's troop only. Then the keyword here is donates. It's not deposit. So since it's not deposit, it only increases the total asset amount + +[31:00] but not the shares. So it's basically I'm just giving away this money to the vault and I'm not expecting any shares in return. Why would someone do this? You will see the reason. And. If you convert the USDC to stroops you know there are seven decimal places in Stellar. So we added after 10,000 seven more u zeros. And then we added the previous ones troop you will have this number for total assets, and now alice deposits thousands troops so. If you divide thousands troops to this total asset amount you will get something really small like 0.00001 or something like, that and this will be ultimately zero. Because of the rounding down, that I explained previously. So basically Alice deposited some money, which was non zero. It was + +[32:00] thousands troops. But in return she got zero shares and. If the attacker redeems their shares they will get all the money basically stealing Alice's troops. The other scenario would be in here. So let's say again this the first two scenario are the same attacker deposited one troop. And then they donated 1 thou 10,000 USDC and after, that say Alice deposited 10,000 USD. So it's almost a half of the total assets right. But only one stroop short of it so. If you do the calculation it will be somewhat like 0.99999. But since we don't have any precision for the decimal points, it will again be round down to zero and attacker would steal all Alice's money like 10,000 USD + +[33:00] due to this rounding down behavior. So this is the inflation attack. This is not only for our implementation. This attack existed through all ecosystems and fortunately we have a solution for, that. The most common one is decimal offset. So instead of this regular or the straightforward calculation we also add an offset to the power of 10 and it will be something like this. So. If you deposit one stroop instead of one share you will get this amount of shares. This is up to your decimal offset. So let's say 1 2 3 4 5 6 7 8 9 and in this case you will have nine for the offset and you will get this amount of shares and let's say for the same attack the attacker. Then donates 10,000 USDC + +[34:00] they won't get any shares. Because it's a donation. Then Alice would deposit 10,000 strooss it would have been this small amount previously. If we didn't use decimal offset. But since we are using decimal offset Alice will get one share and attack won't be attack won't resolve. So basically there is no attack due to this decimal offset any questions about vault and I will just stop presenting and we'll move on to RWA after questions. By the way, maybe we can also share a link for the world implement. Yeah, just maybe we also want to shout out to someone who helped contributed to the + +[35:00] implementation. I don't know. If he's on the call, GS Maxi from Sentinel F. So yeah, he helped with this implementation. Yeah, laid the groundwork and we and he also helped us writing the SE for world. We just got merged today by the way. So we have a new SE guys for the world. All right, great. Then I can continue with RWA. So for this one, I'm going to be presenting over some text instead of slides. Okay. is the is it readable or should I make it bigger?, maybe just slightly bigger. Yeah, I + +[36:00] think this is good. Great. So RWA as you know stands for real world assets and it's more like a switch of contracts compared to a single contract. So it's more complicated and involved for the base RWA token. You can think of it like a fungible token. It has the same metadata and core functions. But on top of, that we have forced transfer, which the admins can force transfers on behalf of the users. We have freezing both address level and partial. Partial means we can freeze some part of your wallet balance. For example, maybe you have 2,000 in your balance and we only froze 500 of it. And address level is your + +[37:00] address is frozen for every like transfer, mint, etc. Operation. So basically you are on hold. We do also provide recovery mechanisms in RWA. In case of you lose your private key. Then the authorities can recover your wallet by sending your balance to another wallet and also all the account related information of you as well. For this recovery of course you have to convince the authorities by providing the necessary documentation. It's not part of the RWA standard. But this is how it will work on behi behind the scenes. And you probably already know about pausable operations for the utility for other tokens as well. This is the same for RWA token. We can pause the operations for the contract. So this base token contract is going to + +[38:00] be interacting with two modules or stacks. One of them is identity, the other one is compliance. Identity stack will be responsible from verifying your identity basically. But it's also very involved and I will get into details later. And the other one is compliance. This is mostly for hooks post and pre hooks for the operations and this is this exists mostly due to allow organizations or governments or etc to include or embed their own business logic into transfers and means and burn operations. So let's go step by step. The RWA token as I told you is the main contract and it + +[39:00] it communicates with compliance and identity verifier. The compliance is managing five hook types, which I will dive further deep into. And the good thing about compliance and identity verifier is they are multi-token support, which means let's say you have a compliance contract you can share this with multiple tokens multiple RWA tokens. So let's say there is another RWA token here. It can also go to the same compliance contract. I will give this example, which I like. Let's say you have bank A and bank B in your country and since they are in the same country probably the compliance, that they need to oblige to will be the same. So they can use the same compliance in this case. Identity verifier again this can be shared across multiple tokens. It validates the identity of the + +[40:00] investors of this token. I will dive this into later. So let's skip for now. If you go for the highle basics for the token you need to provide the name symbol decimals and initial supply very standard basic stuff. Same with the fungible and on top of the fungible we will have the freezing mechanisms. We will have a recovery mechanism. We will have force transfers and possible operations as I told you. So right. Now I already told about how this compliance and identity stack can be shared. So I'm going to skip this one as well. So let's dive into identity stack. So there can be multiple ways to verify identity. The one we provide, which is the default one is claim based. So + +[41:00] you can think of it like I'm from Turkey and I'm a Turkish citizen and in this RWA setting maybe the residency is an issue. So I must have a claim from the authorities, that I'm a resident of Turkey. So I need a residency claim issued for me by the authorities. And one downside of this approach can be. If it is not encrypted. Then all the data for everyone since this is onchain data can be seen by everyone. So this is very useful. But also. If you care about privacy it's it may not be the best solution. So there are other approaches as well like you can do Merkel tree, you can do ZK email, you can do your own custom approach. We do not provide these by the way. But they are for sure doable and we + +[42:00] designed the architecture in such a way, that this coupling here is not tight. But loose. So you can easily plug this one out and plug your own solution for identity stack. Sorry here. So basically you can replace the stack here and plug your own. So what do we provide for the default identity? We provide claim topics. These will be non customer anti-manual laundering etc. For the topics we limited the topics by at most 15. We also have trusted issuers. These are all separate contracts by the way. Like this is a separate contract. And the issuers, the trusted issuers will be able to sign claims on behalf of + +[43:00] the users so, that the users can use these claims. And we also store the information regarding the identities like. If they are individuals or organization, which country are they based in etc. So the identity stack is managing all the claims and the topics and also the account related information like individual organization country information etc. The compliance one. So these are the hooks. This is much more straightforward. We have two pre hooks and three post hooks transferred, created and destroyed. So I will start from the pre hooks. Say your organization needs to run additional business logic for transfers. So before any transfer operation + +[44:00] happens, you want to run some custom business logic, you can do, that. We have a compliance contract, which will call this hook. When a transfer is about to happen. And for this hook to work, you can have your own contracts. We will call them modules. [snorts] And let's say you have a module for can transfer. So what you would do is you would link the compliance contract to your can transfer module contract and this compliance contract will call your module on every time there is a transfer is about to happen and the good thing is you can deploy many contracts per one hook. So for example, you can have three contracts, that will be called for can transfer. But five for can create, which + +[45:00] for be minting. This is completely up to you. We decided to go with multiple contracts due to audit auditability and also separation of concerns. So. If you just wrote your logic into this scan transfer. And then you wrote another logic in the scan transfer by pure code. Then it would be cumbersome to manage and debug. So, that's why all business logics separated into their own contracts, which will be called by a relayer contract or proxy contract is a better architecture. The same goes for these post hooks. You can have additional logic for all of these. And, that summarizes the compliance module. So what about the access control? You have few options. You can go with ownership model or role based access control. So it's completely up to you. + +[46:00] And for each kind of operation you can assign roles have very complex role based access control architecture or very simple one again completely up to you. We. Now only provide one extension for RWA, which is document manager. [clears throat] enables you to attach some legal documents etc to your token. Of course, we do not store the whole document in the blockchain like onchain. This will be inefficient. We are utilizing the hash. And you can also add your own extensions to RWA. That will be it for the RWA. If you have any questions, feel free to ask. I see one question. Is RWA design based on any existing stars? Yeah, of course. So we work with tokenia and ERC + +[47:00] foundation also in collaboration with Stellar included. So all these companies came together and based on the RWA proposal for the Ethereum ecosystem or general ecosystem we made it specific to Stellar. So we made some few deviations from the original proposal and most of our proposals to deviate from it was welcomed and it will be adopted in the original standard as well. So we also contributed to the maturity of the original proposal as well. So it's not designed from scratch. If that's what you are asking. Okay, great. This was three really interesting presentations and I think it's very aligned with what we see, that developers are interested in doing. + +[48:00] yeah the pass key integration and use, that for authorization is something, that's very big focus on I think this framework you presented is going to help a lot of developers to create a robust framework for authorization using pass keys the vault is something we've seen an increased interest in a lot of developers want to build in a way to earn for the tokens, that the users are holding in their wallet. So very interesting as well and RWA it's also super aligned with what SDF is interested in. So I think this is a really this really hits right where the focus is from developer sites today. So super interesting and I think we did post all of the links. But but otherwise go to `opensea.io` + +[49:00] And then you can also see all the documentation there. Let's see was one more comment. Yeah, I don't think there's any other questions. This is last chance. If anyone has a question. But but thank you both of you for joining today. It's super interesting to see all the work you're doing. And I personally I think it's super interesting to see some of the libraries you're building, some of the frameworks and some of the tooling. When we go out to hackathons and. When we talk to developers, that we see, that they're being used. I think, that's, that's the greatest part of it. So. So thank you everyone for joining and thank you two guys for being on here again and u I'm sure we will talk again sometime soon and follow up on the latest development. But thank you for joining everyone. We see you next week. + +[50:00] Thanks for having us. See you. + +
diff --git a/meetings/2026-01-22.mdx b/meetings/2026-01-22.mdx new file mode 100644 index 0000000000..3d72642d06 --- /dev/null +++ b/meetings/2026-01-22.mdx @@ -0,0 +1,207 @@ +--- +title: "Soroban Library Releases Plus Fixed-Point Math, Fee Abstraction, and Timelock Proposals" +description: "The OpenZeppelin team recaps their Q4 Soroban library releases and how they improve financial math, transaction UX, and on-chain governance. The session also covers current protocol governance discussions, including proposals for emergency protections and network resource changes." +authors: [carsten-jacobsen, dmytro-kozhevin] +tags: + - developer + - spotlight + - CAP-77 + - CAP-78 + - CAP-79 +--- + +import YouTube from "@site/src/components/YouTube"; + +## OpenZeppelin Q4 Releases: WAD Math, Fee Abstraction, and Timelock Governance {#part-1} + + + +OpenZeppelin reviews three major Soroban releases—Smart Accounts, Vault, and RWA—showing how programmable authorization, standardized yield vaults, and compliant real-world asset tokens extend Stellar with better UX, composability, and regulatory tooling. + +### Key Topics + +- New `WAD` fixed-point decimal type (18 decimals) for deterministic, high-precision arithmetic in Soroban smart contracts +- Ergonomic Rust API: operator overloading, safe conversions (token amounts/prices), and checked math variants for overflow/div-by-zero +- Fee Abstraction module: lets users pay transaction fees in non-XLM tokens via a relay + fee-forwarder contract flow (useful for classic accounts; “missing piece” for smart accounts) +- Two fee-forwarder flavors: permissionless (trustless) and permissioned (owner-controlled allowed tokens/relayers) +- Governance package kickoff with a timelock contract to enforce execution delays for safer upgrades and exits + +### Resources + +- [OpenZeppelin Soroban Contracts](https://github.com/OpenZeppelin/openzeppelin-contracts-soroban) + +
+ Video Transcript + +[00:00] Hello and welcome to this week's Stella. developer meeting Today. We have we developer meeting Today. we have the open Rubin team here or at least a part of it, to go through some least a part of it to go through some least a part of it. To go through some of the releases, the library releases from Q4. 2025 was a really exciting year for, for 2025 was a really exciting year for the-the open see and stellar, collaboration. A lot of new great tooling a lot of great libraries and we even had the opportunity to. participate in in in some events with with you two guys, Meridian. And Istanbul Blockchain Week. It was Istanbul Blockchain Week. Furthermore, it was great to see you and have your support. at our events. So, yeah, lets at our events. So, yeah, let's, let's get into it. Maybe, Brian, you can start telling us a bit about what, was released in Q4 and have a look back was released in Q4 and have a look back was released in Q4 and have a look back at, at, at the initiatives, Yeah, thank you, Kirsten. Yeah, it's, always a pleasure to participate at. those dev calls. Yeah, 25 was a busy. + +[01:00] Those dev calls. Yeah, 25 was a busy year. Like we shipped a lot of a busy year. Like we shipped a lot of a busy year. Like we shipped a lot of features, to the. features. A lot of features to the library. Yeah, expect that we keep the same, momentum in 26. And, yeah, we're going to present you today three new features that three, three new modules that we added to the library that is the, W A W, the W addition to the M fix W A W, the W addition to the M fix point M, library That will be presented in a short by. Os gun and fee abstraction, the fee abstraction, module, which is something also very important and the beginning of the governance package. That is for the time lock. So, yeah, I am going to pass it time lock. So, yeah, I am going to pass it time lock. So, yeah, I am going to pass it to Os gun. + +[02:00] To Os gun, AT&T. Thank you, I'm going to immediately start, So let's say entire screen, and, and, and. I think it is visible right. Furthermore, I think it is visible right. AT&T. Yep, it is AT&T, Great, Great, So today it's the first topic is w a. So today it's the first topic is w a. So today it's the first topic is w a. it's a high precision decimal arithmetic. For Saroyan smart contracts: The reason for or the need for it is like: why for? Or the need for it is like why for? Or the need for. It is like: why are we not going with integers? Or floating points for integers? we're floating points for integers. We don't have decimals. So, for example, this doesn't have decimals. So, for example, this computation will result in zero. Instead, of 0.5 and for plotting floating points there are quite reasonable. things that we are not using them in blockchain, because in blockchain it needs to run on every computer, right. + +[03:00] Needs to run on every computer right, like, and some people have macOS, some people have Linux, some people have Windows and across platform, the behavior for floating points is changing. and we need something deterministic in, blockchain setting for safety, and and and, yeah, general blockchain. So, what is what it is a fixed point decimal? What is what it is a fixed point decimal? What is what it is a fixed point decimal system that uses 18 decimals? So for example, one will be represented as this like there are 18 amounts of 18 zeros. here. Similarly, you can see the other representations, representations, representations. So the need for this is this. decimal point system, so that financial systems or whatever your use case that will utilize decimals, can work in. + +[04:00] That will utilize decimals can work in, blockchain system, with a deterministic behavior: behavior, behavior. I'm going to dive a little bit, only little bit, into technical details, a little bit into technical details. Since this representation can fit in I, 128 in Rust type, we could use it for 128 in Rust type. We could use it for 128 in Rust type. We could use it for type allies, but we prefer, preferred to go with the new type pattern instead. So we wrapped this internal native type. into our custom type. This allowed us to have the following, For example: we could do custom operator overload for, this type, and we could do conversions from into a from and into other, from into an from and into other types. And we could also write some custom functions for this custom type, So if you went with type allies it. would just be the methods available on, I 128, which would be limiting and in fact, + +[05:00] 128, which would be limiting, and in fact maybe misleading, So let's go with operator overloading. first. So if you, say this price and fee are w a you can just do price and fee are w a you can just do. Price and fee are w a you can just do multiplication, multiplication, multiplication or multi- sorry yeah, addition multiplication and division on them. So you don't need to call specific, functions. This is like for your developer experience. You can just write regular Ra's code and it will work. And we also have operator overloading for cross types, like you can multiply a regular number with w a and vice, versa. And you can also do division. So, since this is a fixed point math, library, there is a limit for the precision. And if you go beyond this precision, as in every fixed point math, + +[06:00] Precision. As in every fixed point math library, we need to do either truncation or rounding, So we went with truncation, And the reason for that is to be, predictable. So what? Whatever method you are going to use, it will truncate, And this will be conservative in terms, of the smart contract. So the benefit will be on the smart contract end, not on, will be on the smart contract end, not on, will be on the smart contract end, not on the user end, which is what we want in the user end, which is what we want in the user end, which is what we want in blockchain setting, and there is no further additional logic going on It, is fast, So this is how we did the operator, overloading for some cases, For example. for addition, this is just regular. in addition. This is just regular for addition. This is just regular addition. So we didn't do much, But for multiplication, multiplication, multiplication. We need to divide the result by what. scale? So we need to scale it down The scale. So we need to scale it down The scale. So we need to scale it down. The reason is the first and the second multiplicands, like the left-hand side. + +[07:00] Multiplicands like the left-hand side, and the right-hand side, both have these. 10 to the power of 18, kind of decimals. So when you multiply them, you actually have 10 to the power of 36. So need to, you need to scale it of 36. So need to, you need to scale it of 36. So need to. You need to scale it down, down, down. And for division, the same goes, we. need to scale it properly. So you don't have to think about this library already has it, and you can just do multiplication and division and you don't have to think about scaling, They don't have to think about scaling. The reason I'm explaining it is to explain this is already handled, so you don't. have to think about it. We also covered. Exponential for it. This is basic, exponent, So the exponent part needs to be integer, So the base part can be decimal and w, So the base part can be decimal and w, So the base part can be decimal and w. this is fine, And let me also type in this is fine, And let me also type in: this is fine. And let me also type in here: in Sloan SDK. You can already have Sorry, you already have this, So you + +[08:00] Sorry, you already have this. So you are sorry, you already have this. So you can take power: integer to integer. This is working already. What we achieved by what representation and power function, is. you can do decimal to the power of is. you can do decimal to the power of is. you can do decimal to the power of integer. Right now. We didn't do decimal to the power of decimal. This is for later milestones, but right now, this is achieved and this covers from what I research more than 90% of the use cases, even for financial setting, And we can also touch a little bit on, and we can also touch a little bit on, and we can also touch a little bit on phantom overflow. So you remember from here we are multiplication two wads. together And there can be a potential scenario. This multiplication will overflow out of I 128 type, So it will overflow out of I 128 type. So it will overflow out of I 128 type, So it will be larger than I 128. In these cases we be larger than I 128. In these cases, we be larger than I 128. In these cases, we automatically scale up to I 256. + +[09:00] Automatically scale up to I 256. Then we do the division. Then, if it's, fitting, we convert it back to I 128. If fitting, we convert it back to I 128. If fitting, we convert it back to I 128. If not, we result with an overflow. So this, phantom overflow is also handled for both power and multiplication. And you won't see confusing, conversions, conversions, conversions like, for example, let's say you want to convert five to what, but what should this representation mean? Is it the row value five? Like you will get this. The smaller it's the smallest amount, possible. Possible. Possible five. Or you should get the scaled version five. Like this is unclear from this representation right. So that's why we didn't do ambiguous from and into. default conversions. Because the intention is not clear here. Instead, + +[10:00] Intention is not clear here. Instead, we have from integer and from row and we also have from token amount and to token amount. So I'm going to go with this amount. So I'm going to go with these examples right now. So let's say you have a token, you have an USDA amount of that much and this corresponds to 1.5 because in USDA you have six decimal, points. So this part is on the decimal part. So if you say from token amount and you provide this amount and you also, provide the decimal point. It will correctly, correctly, correctly scale your version to W A and you will scale your version to W A and you will scale your version to W A and you will get an accurate representation in W A, and similarly the vice versa conversion. is also safe and accurate. You will. If it is also safe and accurate, You will. If it is also safe and accurate, You will. If you convert WAD to token amount, you will. get the corresponding token amount, correctly, correctly, correctly. So I'm going to shortly touch base on. the API reference. Here are the. constructors we have from integer, from ratio, from token amount, from price and + +[11:00] Ratio from token amount from price and from row. So let's say you convert a from row. So let's say you convert an integer five, then you will get five sad like full, Pi five, not.0000 five. But you will get the actual five, But if you go from row u, then what you. But if you go from row u, then what you. But if you go from row u, then what you provide will be the scaled down. So in order to get one, you need to provide 10 order. To get one, you need to provide 10 order. To get one, you need to provide 10 to the^ of 18 for the row Representation, representation, representation- We have also converters to integer to. token, amount and row. We have arithmetic operators, like addition, subtraction, multiplication, what by what? And multiplication, what by integer? And multiplication, integer by what are all covered in operator, Overloading: We also have division and division by integer, and we have negation, negation, negation. Ah, one point to say about this: these + +[12:00] Ah, one point to say about this. These, are behaving exactly as in the rust, operators. So if you get an overflow, you will get an overflow, and it will panic, as in base rust operations. So we try to, as in base rust operations, So we try to as in base rust operations. So we try to follow the rust convention com, Conventions here to eliminate confusion. If you want to be safe, these are the. If you want to be safe, these are the If you want to be safe. These are the checked variants. Again, as in native rust, rust, rust. We also have utility methods, the. Furthermore, we also have utility methods: the absolute minimum maximum and power. And for these checked versions, and errors, you have the following errors, defined which is overflow, or division by zero, zero, zero. So, if there are any questions, I can. take them, but right now, since I take them. But right now, since I'm sharing the screen, I can't see the screen. So I'm going to wait for a minute, minute, minute. And yeah, based on the input, I can stop. And yeah, based on the input, I can stop. And yeah, based on the input, I can stop. Sharing, sharing, sharing, AT&T. Yeah, it doesn't seem like we have any AT&T. Yeah, it doesn't seem like we have any AT&T. Yeah, it doesn't seem like we have any questions at this point. + +[13:00] Questions at this point. AT&T. Okay, thank you, and I can hand it over to boy. Thank you, and I can hand it over to boy. Then was good, yeah, yeah, yeah. So yeah, I'm going to present you the. fee abstraction module. First, what is fee abstraction? This is a mechanism that enables users to pay for, transactions with some tokens, that are are are instead of native XLM. So for, transactions you pay in XLM, which is the regular way of doing it, and this module fee abstraction is allowing users to pay. for transaction cost with other, tokens, tokens, tokens. What are the benefits for? We can see it from different. perspectives depending on who is. + +[14:00] Perspectives, depending on who is transacting. So for classic accounts this provides another option to paying fees. So it's just a better user experience for classic accounts. But for smart accounts. This is something like a. a game changing The missing piece. So smart accounts, Maybe, if you are not familiar with smart, accounts, we, at the beginning of November, when we attended another dev call, we presented smart accounts. So I invite you to watch this. presentation if you want to get familiar with smart accounts and the framework we are proposing So framework we, we are proposing So, basically, smart accounts are smart, are contracts and contracts. They cannot initiate, + +[15:00] Initiate. They cannot initiate, transactions. Right, they need assistance of another account to do so, And if we call this account a relayed, So smart accounts, they and delayers, they need somehow to understand each other, They, they need a protocol, some framework through which they framework through, through which they, they communicate and settle on, paying fees, and yeah, this is what u the fee Abstraction provides here in this case. What are the core elements of this? module, module, or of the abstraction as a like a general like mental model. So we generally like mental model. So we have a user here behind user. I mean both. Have a user here behind user. I mean both have a user here behind user. I mean both. Any kind of u accounts. It could be smart accounts or a classic account, the user that has some USDA in their + +[16:00] User that has some USDA in their holding, some USDA, and they want to hold some USDA, and they want to call a function on a target contract. We call a function on a target contract. Furthermore, we have the relayed that holds XLM, There are some a component of off chain. interaction between them. So the user request makes a request and the relayed responds with some quotes and if they respond with some quotes and if they agree, so the settlement happens. Unchain on a contract that we are going to call fee forwarder contract, What is, the user flow? So, first the user. This is the unchain. component of the of this, model, The user sends to the relayed, assuming there is sending to the relayed, assuming there is an API that relayed uses the relay. The user sends to this, + +[17:00] Relay. The user sends to this, relayed request. containing the address they want to, invoke the target function. The arguments and the token that they, in which they are willing to pay the transaction, are willing to pay the, the transaction fee, fee, fee. Then the relayed returns back. Then the relayed returns some max amount that they are meaning the, the price they are willing to pay for this transaction and the two to pay for this transaction, and the XDR to that that needs to be sent by XDR, to that that needs to be sent by XDR to that that needs to be sent by the user, Then the user receives this and if they're, if they are okay, they sign they are. If they are okay, they sign this invocation, but also they, they, they, sign an approval, the fungible approval. that we are going to see in a moment how this play out. So they sign it and return it to the relayed And now we go on the unchain. + +[18:00] Relayed. And now we go on the unchain on unchain flow with this signed XDR. The relayed also signs it as a source. account submits to, submits unchain to the fee forwarder contract. And we'll pay the, the, the native XLM as a transaction fee, within the fee forwarder contract. We, transfer the token, is a fee from the user to the token is a fee from the user to the token is a fee from the user to the relayed. This is the that, this fee that will cover the transaction fee, but it might also contain some profit for to contain, some profit for the relayed that are like doing some useful that is doing some useful job, Then, after this is paid the fee, + +[19:00] Then, after this is paid, the fee, forwarder contract. Invoke the target contract with the signed authorization, and this produces the the desired effect for the, end user, This part is like the a single. transaction. So it happens in an atomic way, way, way, Then in the library, we propose two flavors of this fee, forwarder contract, Of course, like anyone can write their own fee forwarder, contract. But like we are proposing two two versions of it- permissionless and permissioned. Permissioned, permissioned. The permissionless, is a trustless contract. Meaning that once we deploy it there is no owner. That once we deploy it there is no owner. That once we deploy it there is no owner. That can change some. Some settings Anyone can be a relayed, meaning + +[20:00] This can create some dynamic, some interesting dynamics for some secondary market for u delayers, And the permissioned fee forwarder is, on the contrary, a node contract. Meaning, that there is a clear owner that can, Can specify what are the allowed tokens that users can use to pay for, transaction fees, who can act as a relayed. So it is, of course, like a depending on, it, I can see it very well for some it I can see it very well for some it I can see it very well for some applications for some dabs. That are willing to have more control, on who can who. Who can serve. Who cans on. Who can who. Who can serve. Who cans on. Who can who, who can serve, who can be a relay and what tokens they are being a relay and what tokens they are allowing they to are willing to take. + +[21:00] Allowing, they are willing to take, as a payment, So what are the key takeaways? for classic accounts, this model is just another option. So maybe we can call it also a better. User experience: But for smart accounts it's a very important addition because it defines how the relayed and the smart account, should interact with each other in order, to, to make transaction happen, And u something that we'll see later this year. This, this model, will integrate with our relayed service. So yeah, this is about the Fe. abstraction module. If there are any abstraction modules, If there are any questions, questions, questions, I can take them. + +[22:00] I can take them. It doesn't seem like there's any. Furthermore, it doesn't seem like there are any questions at this point. AT&T. Okay, then I will go then to the next AT&T. Okay, then I will go then to the next AT&T. Okay, then I will go then to the next module, which is the first, module from the governance package. What is a time lock? This is a. smart contract that enforces time, delays on transaction, execute ex execution with the goal to allow for safe exit. If there's some disagreement with a certain governance, decision, decision, decision How this works. Maybe I mean there are different use cases for using, time lock, but the most obvious is in a time lock. But the most obvious is in a time lock, but the most obvious is in a like a very simple setup on owner, Owner based setup meaning we have an owner based setup, meaning we have a contract that many users interact, with and there is an owner that can change certain settings on this contract. + +[23:00] Change certain settings on this contract, and usually this is some account right, and if the owner wants to make some change on the contract, they just call the, the functions that are permission that they are callable, only by this owner, and they change it immediately. And the users don't have time to react to this. Okay, So they trust the owner like the. The good fate of the owner, but also like the This, this bear, this bears many risks. I mean not only the good faith of the I mean not only the good faith of the I mean not only the good faith of the owner. But if what? What if the keys' owner? But if what, what if the, the keys of the owner get compromised and some hacker takes control over the, the, contract. So to minimize those risks, we are introducing the time lock, which is a contract, and we designate this. + +[24:00] Is a contract, and we designate this time lock contract as the owner of our initial contract, And so, in order, like for, this to work, we need also another layer. of u of accounts that are serving different roles. Here we have a proposer and an executor, and we'll see how like how this, works, So yeah to. To recap in the: classical in the left-hand side: we have the. The setup where account they have the. The setup where account. The account is directly the owner and the. The intent, the intention to do a. change and the execution of this intention through a transaction is like tied is very close, it's tied. together. So they happen immediately and + +[25:00] Together, So they happen immediately. And in with the time lock. There is the intention to make a change. That is expressed by the proposer, This happens at some moment. There's some time that elapses some. period, and then we execute the intention. So we the time Between the intention and the execution, is split is get separated, So the unit of this whole flow is an. So the unit of this whole flow is an. So the unit of this whole flow is an operation that is simply a target, the target address. So the contract that we want to act upon a function, the arguments for this function and the predecessor meaning that we are chaining operations, one to each other and imposing some sequence. + +[26:00] Some sequence, on the sequentially on the EXE, execution. Meaning the newer operation cannot be executed before we execute the predecessor, to execute the predecessor, the previous one, What is the life cycle of this? operation? So first the sh, the proposer schedules such operation, meaning they, they invoke a proposed function on the time contract, Then there is some delay that we wait for. it to, to pass The executor once the operation, this is The executor, once the operation. This, this time passes. The operation is marked. as ready and the executor will call an execute function on the time block and which will provoke the, the desired Effect on the target contract and we are done, the operation is done. + +[27:00] We are done, the operation is done. What are the benefits? So, first, as we mentioned in the classical like the, the most obvious benefit is that it allows investors to exit in time if, if necessary. It also exits in time if, if necessary. It also forces the admins of the contracts to be, more transparent with the other, users. Meaning they need to communicate in advance about what, change they. They mean to, to change them. They mean to, to, to do on the target contract: U. so it is does on the target contract: U. so it. It does on the target contract U. so it is. It promotes transparency. Also in the past we've seen in how. Besides in the past we've seen in how, for example, this separation between the intention and the execution provides some time to react on errors, because it is transparent. So everyone from the community can spot, can. + +[28:00] Community can spot, if their errors and prevent them. from happening And, as we mentioned, this also prevents some malicious. takeover of the, the owners or the, the owner's accounts on at the end, this is this whole system provides more. guarantees to the community, So, as I mentioned, this is the first. module from the governance in the next milestone, which is in Q1, we are going to ship the. fungible and the non fungible vault extensions. These are extensions that will allow counting to to to will allow counting to. To attribute some voting power to tokens and the. The governor with all the features about quorum, about counting. + +[29:00] Features about quorum, about counting votes and so on, So that's it for the time lock. AT&T. It's fascinating, it doesn't? seem like we have any questions. AT&T. Yeah, if I mean we, if there are. AT&T. Yeah, if I mean we. If there are any questions, we also are presenting. in discord. So, yeah, feel free to ping us in discord. So, yeah, feel free to ping us in discord. So, yeah, feel free to ping us. If you want to learn more, there is. also the documentation. docsopenscom. opening, opening, opening, where these modules and their, functioning is explained in more details. So, yeah, looking forward to see people using those modules and tweaking them and making them for like, useful for their use cases. + +[30:00] Like useful for their use cases. AT&T- Great Well, thank you so much for presenting these, these great new. additions and these new concepts, I think the governance is going to be thought the governance is going to be, think the governance is going to be fascinating to see how that develops this year. I think there's a develops this year. Furthermore, I think there's a lot of very interesting, interesting, u things lined up here. Yeah, AT&T. Thank you both for joining. And yeah, it's actually been its last year. Was itbeen actually been its last year? Was it's actually been its last year was AT&T. It was fascinating because I think that's where we really started to see, especially towards the. The last half of last year, where we really saw at half of last year, where we really saw at half of last year, where we really saw at hackathons and builders around the world. started to include and use the The open seven libraries and really started to to to implement it and it's been. great to see how much value this is bringing to, the community and and and how excited the community is about all the work. the great work you're doing So. so thank you, so much for joining and AT&T, thank you. for hosting us once, again, Looking forward to the next time. + +[31:00] Again, Looking forward to the next time. AT&T. Yeah, we definitely do another one when. AT&T. Yeah, we definitely do another one when-when we have more updates. So thank when we have more updates. So thank you for joining everyone and u glad to be here. AT&T. See you later. Bye, bye, All right. + +
+ +## CAP-77 Ledger Key Freezing, CAP-78 TTL Policies, and CAP-79 StrKey/Address Conversions {#part-2} + + + +We had three CAPs prepared for this meeting. CAP-77 introduces a way to make ledger keys inaccessible via a network configuration upgrade voted on by validators. CAP-78 proposes an interface that lets developers specify TTL extension policies (for example, "if an entry TTL is less than 29 days, extend it to a 30-day TTL"). CAP-79 adds host functions for converting Stellar StrKey strings to and from Address/MuxedAddress objects. + +### Key Topics + +- CAP-77: Proposes a validator-voted network upgrade mechanism to make specific ledger keys inaccessible (an emergency tool motivated by past corruption and incident response needs) +- CAP-77 (edge cases): Discusses tricky edge cases for classic entries and DEX behavior, with a goal of being “surgical” (minimizing broader network disruption) +- CAP-78: Proposes a contract interface for TTL extension policies (e.g., “if TTL < N days, extend to N days”) to standardize and simplify rent/TTL management +- CAP-79: Adds host functions to convert StrKey strings to/from `Address` and `MuxedAddress` (including support for muxed/max-address-style identifiers) +- Closing note: Reminder/discussion of SLP-4 to raise network resource limits and lower non-refundable fees to enable more on-chain work at lower cost + +### Resources + +- [CAP-77: Ability to freeze ledger keys via network configuration](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0077.md) · [Discussion](https://github.com/orgs/stellar/discussions/1811) +- [CAP-78: Host functions for performing limited TTL extensions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0078.md) · [Discussion](https://github.com/orgs/stellar/discussions/1825) +- [CAP-79: Host functions for muxed address strkey conversions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0079.md) · [Discussion](https://github.com/orgs/stellar/discussions/1840) + +
+ Video Transcript + +[00:00] Another case is just a bit better. extension strategy for general purpose. contract, so that you can spread out the extension feeds among users, more evenly by limiting max extension, And the way we achieve this is, described in the gap. We provide a new interface for the tail extension, function. Or rather, two functions, One for the contract data, another is for contract instance in code And instead of the old threshold parameters, that defined min extension, we define just the range of min extension and max extension. That tells that extension will be only performed if it is at least, will be only performed if it is at least, will be only performed if it is at least minion, and then it will be clamped by max extension. So can set up more. + +[01:00] Max extension. So can set up more complex detail management strategies, I don't think there is a need to go. I don't think there is a need to go. Furthermore, I don't think there is a need to go into into into math more deeply, Basically, there is some edge case there. There is some edge case handling. But the gist of it is really that, besides limiting the minimum, extension necessary to actually extend to DL, you can now have the max extension as well, Please take a look at the cup for more. Please take a look at the cup for more. Please take a look at the cup for more details and if there are any questions regarding, this cup right now. Please feel free to ask someone. I guess the only kind of tricky part, Okay, So the question is, for example: current detail is 10 extend to 20 mean. + +[02:00] Current detail is 10. Extend to 20 mean. Five max 8. extend would work with current. Okay, so it is important to distinguish. between TTL and leave until ledger TTL, is basically how long will entry live? from this moment, For example, we are on ledger n and even till ledger. We are on ledger n and even till ledger. We are on ledger n and even till ledger is like n plus th00and that means that GPL is a thousand lectures. Because the entry will live for a thousand lectures. from now. So let's say your current TTL, not wedder is 1,000 wedges. and then you can say I want to extend. And then you can say I want to extend, and then you can say I want to extend the TTL to be 10,000 wedges, but only allow extending by. Let's say, + +[03:00] but only allow extending by, let's say, 500 wedges at a time. So, for example, if you're on thousand welders now your, target is 10,000. Every user that calls it will extend the entry just by 500. wedges. And that's limiting the fees Every individual color between, and, on the other hand, again the same. scenario: you want your TL to be 10,000. but it's already 9,999, You know the extension will be just one. additional ledger. And if your main extension is, for example, 100 ledgers and you want to pass the check and TL, extension will not happen at all. Which is basically similar to what threshold does, but just is a like the math is a bit different, but the end result is exactly the same, I believe. + +[04:00] I think is a topic that is easier to. I think is a topic that is easier to digest like a piece of paper than trying to digest like a piece of paper, than trying to digest like a piece of paper, than trying to clean it but yeah, yeah, yeah, has all the equations, so can plug in. some numbers and see what comes out of it. It. Yeah. So the other consideration I wanted, to add-is that for temporary entries to add, is that, for temporary entries, max extension will work in the same way. Which may or may not be a bit of a foot, which may or may not be a bit of a foot, which may or may not be a bit of a foot, gun because, if you miss the extension of a temporary, entry, it will be gone forever. And this may not be desirable for a lot of use, may not be desirable for a lot of use, may not be desirable for a lot of use. Cases of temporary entries. So generally speaking, this min extension. + +[05:00] Generally speaking, this min extension, parameters, So the threshold parameter in the existing functions is really more for the fee management, for the, persistent entries and for the temples you generally want the extension to. always happen and to always happen in precise fashion. Or else you're risking to just lose your temporary entry, because hasn't happened. And yeah, protocol allows that, but probably at this DK level. We'll try to make sure this the default is reasonable and this is the default is reasonable and this, the default, is reasonable, and it is hard to do something weird. For you with extension, because we, misconfigured your Mina extension, Okay, wait for like few more moments if I want. Wait for like few more moments if I want to type in. + +[06:00] A second. Well, I apologize for that, but not sure. if I can do anything. Okay, yeah, it may be discord acting up on. Okay, yeah, it may be discord acting up on people. Yeah, sure, what can you do about it? Right, if you have any further. questions or suggestions regarding this? CAP. The discussion has been linked. above. So please feel free to comment asynchronously, asynchronously, asynchronously and visit. Let's proceed to cat 79. Let me link it. Okay, LY, this one is a simple CAP as well, and + +[07:00] this one is a simple CAP as well. And this one is a simple CAP as well, and it just adds host functions for the max. address string to conversions, and it basically closes a bit of, feature gap or an oversight. We've introduced in Protocol 23. In per code 23, we've introduced a new type of address: max address, which allows users to specify an additional it to 64bit, 64bit, 64bit. I am sorry, 3 to 64 bits doesn't really. I am sorry, 3 to 64 bits doesn't really. I am sorry, 3 to 64 bits doesn't really matter. But basically it allows users to specify an additional, memo, together with their click key, and then this memo can be consumed by the exchanges, to distinguish between multiple users, that + +[08:00] distinguish between multiple users, that have the same address on chain. So that we can have a federal account. that has multiple users but only a single sure entry. Holding the balance on chain. Okay, okay, try to actually disable noise. Suppression: I, I know that did introduce some artifacts before hope. It's better now. So, yeah, anyway, coming back to zap u m addresses has have been added in Protocol 23. You can pass them in as contract, arguments, but what has been missed is the St key format conversions, that exist for regular addresses and sorry use cases for keys. + +[09:00] And sorry use cases for keys Mainly, mainly, mainly, bridge protocols, protocols that generally do anything. Cross chain and ST key is common format to use for. messaging, like if you need to specify storage destination from desert chain store key is a preferred format and not being able to handle key for max, addresses kind of limits, the protocols, I guess. the C is very straightforward. It just does the conversions. The only thing about it is that well potentially, not potentially, but we we have already implemented this, functionality in the SDK. So there is potentially an argument of: well, why do we even have it in host? And I think in we even have it in host and I think in we even have it in host and I think in this case it is mostly for the sake. + +[10:00] This case it is mostly for the sake. of the, full feature set, like we already have the key conversions which was possible, to get away without the host functions, before, and, of course, doing things on the old side is just cheaper and with more future-proof, hopefully because the implementation can be for, example, fixed without updating any contract, in case there are some set issues which we hope won't be the case, case, case, so, so, so, yeah, that's the CAP and I guess in this, yeah, that's the CAP. And I guess in this, yeah, that's the CAP. And I guess in this case, yeah, it's just cheap enough to implement to have it have a consistent host function coverage for all the address kind, instead of this weird feature gaps that we have right now. + +[11:00] Exactly NS feature part. I think is an Exactly NS feature part. I think is a main driver here, Okay again, I will not okay. Keep max address. no, the CAP. Okay, Maybe I'm not the CAP. Okay, Maybe I should have answered the cup Specification. Specification: specification: the gap returns either address object. for the regular keys, or or or. well, yeah, I guess it's a semantics. Question, what? What do we even call address? Because we have this two different object types address object. max address object. And the function can return either one. + +[12:00] And the function can return either one of them. So I guess it's kind very philosophical question. It should be philosophical question. Furthermore, it should be called too much to address or just to address, address, address. Yeah, I'll give it a thought, probably. but doesn't seem very important in the end. I think I guess my intention end. Furthermore, I think I guess my intention here is that V2 would be used. Going forward for all the use cases. So that you don't, you can just safely stop using V1 in the SDK and won't be any semantic V1 in the SDK and won't be any semantic V1 in the SDK and won't be any semantic weirdness. All right, we'll spend a few more. moments before typing. + +[13:00] That's a good question. Yeah, I think. If address is using the old one, then you can name this more specifically for max, services. Because like, yeah, you could use a new function, but then you will need to a new function, but then you will need to a new function, but then you will need to distinguish between different object, types and fail, and that's unnecessary work. I agree that old function is still work. Furthermore, I agree that old function is still useful for the case of like normal address, conversions. Actually, you kind of do care if it is an M or G address, because some addresses are like bit gain. We try to limit the scope, for where you can use them, For example, you cannot require an m address. You need, you cannot require an m address. Furthermore, you need to convert it to a normal address first, And, yeah, not every contract can deal. + +[14:00] And, yeah, not every contract can deal with M address correctly, which is why it's completely its own thing. So, basically, max address is a super set. of address. If you can deal with max address, you can deal with address but not vice versa, Yeah, I think that's a good. suggestion. Yeah, I think, after reconsidering this. Yeah, I agree, that two maxed address and max, address. 230 sounds more in line with how we use them, Thank you for the suggestion. I will thank you for the suggestion. Furthermore, I will update the accordingly. + +[15:00] Right as usual. Any questions or comments can be left on the discussion. thread. And let us move to CAP 77 Right Boston, And this CAP is much more. straightforward than the previous two, And to give a bit of background of why this came up, as some, all of you, why this came up, as some, all of you, why this came up, as some, all of you may know, we had an incident in Protocol 23, caused by data corruption bug, And the initial response when the incident has been discovered, was to make it so the corrupted data. + +[16:00] Was to make it so. The corrupted data cannot be accessed to prevent and for their corruption before we understand better, better, better. And that was especially relevant for, the entries that have been corrupted, but not yet restored. So like for these entries, we definitely have prevented further breakages and corruption, And the way this has been achieved was, via an emergency release of tower core, that basically just hardcoded some, hardcoded, they affected contract, data keys and rejected any transaction that accessed any of this Bad keys in the footprint. + +[17:00] Yes and yeah, I will. talk about this. Yes, and this has been achieved without, any protocol changes, right, yeah, this, Captors will have to agree. I'm just captors will have to agree. Furthermore, I'm just talking about like to, to provide some background like what auditors can do, today without any protocol changes, or anything. Is that they can just exclude transactions from the man pool? They can do this for whatever arbitrary malicious and unmalicious reasons, But it's an important thing to, understand when thinking about this CAP. because it's doing something like. very uniquely different from what, lawyers can do today, So yeah for the for this. Corruption issue: custom core build. would reject the transactions that try. to access bad keys at the male level. + +[18:00] To access bad keys at the male level, and, and, and. Corruption could only be prevented at. the moment when every tier one valuator, has updated to this new core build, such that there is no way that a transaction would end up in a transaction set that accesses corrupted data. And it is a very fragile and time-consuming procedure. Right because, first, a new release has to be, pushed and second, you need like a 100% consensus on this, so to speak, like consensus at the level of build and its consensus at the level of build, and it is not providing any guarantees, even because can fall back the build, or whatever intention or unintentional, reason, reason: + +[19:00] which motivated the gap that would, allow doing something like this at to allow, doing something like this at the protocol level, and the benefit of this at least conceptually doing this at protocol level as well, an explicit consensus of all data is required. which is both good from the transparency, standpoint right because it is possible to observe what validators have voted for, but also from the emergency response standpoint. We actually need lower percentage than 100 to agree on applying a network upgrade. So even if not every validator but just say five out of seven is the but, just say five out of seven is the. But just say five out of seven is the current network config right now. Are the upgrades that would freeze the ledger keys. Then the upgrade will go. + +[20:00] Keys, then the upgrade will go. through. And also it is not easily reversible without votes. So there is no concern that: oh what if someone changes. their build and what not Obviously This is the case, Like this scenario is something that we'd like to avoid in the future, because we, I guess one of the arguments, maybe against this camp is: well, probably don't want to have any similar data, Corruption issues in the future, But of course it is never possible to say that the system is 100% formally, valid and will never ever result in any bugs. So that's a bit of trade-off between like making protocol more, complicated for the off chains. If data gets corrupted again, for whatever reason, and freezing the keys would be. + +[21:00] Reason and freezing the keys would be that right remediation for this, And another potential motivation for, this would be: if there is like known, hack, a vulnerability that affects some contracts for users, for example, there is a theoretical possibility for freezing the balances, But I think this case is much less clear. But I think this case is much less clear, but I think this case is much less clear. If it is relevant or not, because if you're talking about like high-profile, hacks, hacks, hacks might be really tricky to chase right by. your entries. Some exact cases of bugs, were you know they're likely not being, did you know they're likely not being exploited actively, and the impact is more or less limited to some set, of entries, somehow, but you know it's still a consideration. Okay, let me read the + +[22:00] consideration. Okay, let me read the question from chat. Yes, so, now, in terms of the actual ways the cup now, in terms of the actual ways the cup now, in terms of the actual ways the cup works, works, works, there are like it pro it covers three. types. Three kinds of entries. Oh, I guess yeah, on high level, just two kinds. Urban entries and nonurban entries. And for non-stop bind entries, we are. And for non-stop bind entries, we're only considering account and trust line, entries, because this seems to be like if we bother implementing this logic, for classic entries, this two seem like the most obvious candidates, everything else on classic does not actually hold value usually and the risk of like, even if corruption happens there, the consequences are probably pretty, tame and, yeah, it's more of a + +[23:00] tame, and yeah, it's more of a complexity: trade-off, And Saroyan- only entries, of course can, and Saroyan, only entries, of course-can only be accessed from Saroyan, which makes the freeze process much easier. And now to answer the material's question. yes, if network votes to freeze, contract code key, for example, then any transaction that accesses this contract, will be rejected, It will not even fail till, not even be. It will not even fail till, not even be. Furthermore, it will not even fail till, not even be included into a charge. So I don't know, for example, if a contract, gets corrupted for whatever reason and skips authorization check, somehow Right, freezing the contract is actually a good remediation, because you know the user will not be able to interact with vulnerable contract. + +[24:00] Yeah, basically what is the use cases? Yeah, basically what the use cases that will die? Are this fitting the that will die? is this fitting the CAP? But the idea is that any interaction- either read only or write interaction with the entry is prohibited. with like one small exception. We said with like one small exception. Furthermore, we said, there again for the sake of simplicity, But the idea is that the entry cannot, be accessed. It's not only that you cannot, for example, modify balance. It cannot, for example, modify balance. Furthermore, it's also that you cannot even read it. Right. If you try to read this balance you will fail as well, because if balanced, you will fail as well, because if balance is corrupted, for example, then you may arrive at bad results, which happened. for example, with Protocol 23: corruption, bug where liquidity. + +[25:00] Bug where liquidity pool got incorrect balances, That's actually don't recall. If the data inside the pool was corrupted, further. But there is definitely a risk of this happening, like if you have any mass based on just reading the corrupted entry this mass may go wrong and then you may end up in bad state in an you may end up in bad state in an you may end up in bad state in an unrelated contract, so, yeah, let me continue with this pack. That may answer some more questions. So yeah, basically, as I mentioned the. intention is to not include, the transactions into lure at all. If we + +[26:00] transactions into lure at all, If we get into dealing with account and trust, entries. That's unfortunately not always possible. Because, like Sora ban, no footprints for classic operations, and and and it is not always possible to know if a. it is not always possible to know if a. it is not always possible to know if a frozen entry is even being accessed. which is why a fraction of transactions, would fail at apply time, and it's also some separate logic for handling decks? U described in the CAP that I will talk. about in more detail later, But yeah, the intent is really. just to prevent the interactions with, the entries and, unfortunately, the way of doing this is kind of complicated. because, like, if we include classic, if we don't include classics, then it is as simple as filtering based on footprint. + +[27:00] Simple as filtering based on footprint, So let me talk a bit more detail about, So let me talk a bit more detail about, So let me talk a bit more detail about the semantics first is the upgrade. process itself. It needs to be customized a little bit compared to the normal network upgrades as well, because the maximum size of the network, upgrade is limited by the contract data size, and it may be a limiting factor if we need to increase many, keys, which is why we key, which is why we the proposes a structure where delta of the changes since frozen entries, is voted for, so that the total amount of is voted for, so that the total amount of is voted for, so that the total amount of frozen keys can exceed the contract data. entry size and can be updated incrementally, incrementally, incrementally. That's a bit of an extension on the. + +[28:00] That's a bit of an extension on the current upgrade process, which rather straightforward, but again it's like a bit of additional complexity of this CAP. And now to the fun part of how. actually this would be implemented at. the protocol level, So, as I mentioned for Robbin, transactions, things are very simple. If we see a frozen key in the footprint, If we see a frozen key in the footprint, If we see a frozen key in the footprint, we recheck the transaction and that's. it. We don't need to do anything else. We can also like if an account is. frozen, and it's a source account of the transaction operation. Again, you just reject the transaction. That's simple as well, well. Well, Then the CAP lists operations. Where, it is easy to say what is a source trust? + +[29:00] It is easy to say what is a source trust. It is easy to say what is a source trust line, destination account or trust line, by just looking at the operation which, is good chunk of the operation, So basically again, if we inform that a frozen account and trust line is accessed, we reject the transaction and everything is good Now to the non-trivial operations. First, cable balance operations: claim balance and liquidity pool. deposit and withdrawals, or using opaque identifiers. And without doing some logic, it's not possible to tell what the destination is unless we do some additional type of indexing, which I'm not sure if you want to do which, I'm not sure if you want to do which, I'm not sure if you want to do, but the suggestion here is just to, fail at the applied time if a frozen key. + +[30:00] Fail at the applied time, if a frozen key fails at the applied time, if a frozen key is being accessed after we have actually determined what are the actual trust lines participating in the operation And I believe for KAL balances it's even, more tricky because a balance can have multiple. Please never mind that I said something. incorrect, incorrect, incorrect. Anyway, This is like a bit ugly, but still straightforward. What is ugly, and kind of complicated is DEX operations. So again, if you wanted to support free prison and trust lines and accounts, then we need to care about decks-classic store decks and decks, classic store decks- and of course it is not possible to tell, beforehand if a frozen balance is being affected by deck separation. + +[31:00] Being affected by deck separation, It will only be known, known at the. apply time and then at the apply time, we apply time and then, at the apply time, we apply time and then, at the apply time, we could also just fail if we encounter an offer that would result in modifying a bad balance, frozen balance. But that would have frozen balance. But that would effectively, effectively, effectively freeze a whole asset pair for trading, which is a bit of, AT&T. An unexpected impact. for the change week. We want to be for the change week. Furthermore, we want to be pretty surgical about this. Like we want to prevent operations that depend on, corrupted entries. But we don't corrupt entries. But we don't really want to disrupt network activity, more than necessary. Beyond that and freezing all trading pair and deep, may be problematic, Of course, chances of that happening, are not very high, but still we want to are not very high, but still we want to are not very high, but still we want to prevent this in the protocol. So the prevention this in the protocol. So the proposal in the CAP is instead of: + +[32:00] proposal in the CAP is instead of freezing the pair, is to emulate crossing this offer without modifying any balances, and just remove it, which is not great. Remove an offer, But it kind of gets around the issue, of of of freezing the whole trading painter And of course we could add some more. complex logic to just skip it. But we would need to skip it every time. we encounter it And yeah, it would require some pretty. invasive changes to classic decks for a very limited use case, that we don't. even know if it happens. So the proposed solution is basically a trade-off between complexity, and and and usability of not freezing the whole trading pair. Kind of agree, it's not. + +[33:00] Trading pair, kind of agree, it's not pretty, but it's the best you could come up with that seem relatively non-intrusive. And the last but not least, exception to, all this mess is well if an account is all this mess is well if an account is all this mess is well if an account is frozen. What happens if someone removes an entry sponsored by this, account? And again it would kind of be weird to fail everyone that has this interact, Sponsorship, dependency on a frozen account, which is suggestion, is to allow modification of nonsponsoring, Field, field, field, which is a bit of, a leak of the free semantics, a bit of a leak of the free semantics, a bit of a leak of the free semantics, but it seems acceptable as we not really, but it seems acceptable as we're not really touching the balance or anything like that. It's just sponsorship field. + +[34:00] That it's just sponsorship field, and, and it's very unlikely to be corrupted, and even if it is corrupted, they can. just, yeah, they should be relatively easy to fix if necessary. Product. So, so, so it seems like again it is a trade-off. between like complexity because tracing the sponsorships to free frozen, entries is much more complex and probably needs to happen at runtime for, every operation which seems like a lot, of overhead. So this, this is basically a small compromise between complexity and limiting impact to only what is hopefully relevant. + +[35:00] Contracts do not. I see question from John: What? What other challenges exist in having contracts call to exist in having contracts call the decks? Congress cannot call the classic decks at the moment, and I'm not sure if they will ever be able to precisely because of their reasons, for why you have all this nice cure It, for why you have all this nice cure It, for why you have all this nice cure. It is not possible to know beforehand, What is the data that will be modified? by any traction involving text? We, just buy any traction involving text. We're just going against this San data model, where we define everything. That is going to be accessed in the footprint. Yeah, contracts cannot call DEX. But, for the purpose, like if you wanted to, freeze accounts and trust lines, you would need to deal with this deck, somehow, somehow. So, yeah, that's more or less it for the. So, yeah, that's more or less it for the semantics. It's again, there's a lot of semantics. Furthermore, it's again, there's a lot of complexity in handling the classic entries. And an argument could be made to + +[36:00] entries and an argument could be made to maybe limiting this CAP to only sol, entries, because this only appear in the footprints and it is very simple, Check so seems weak and much simpler. Change than what is described here. But of course the impact is much more, limited. And if an account gets corrupted for whatever reason, then of course we won't be able to do anything, about it, Yeah, that's pretty much it for the. semantics And I guess I've touched this a little bit. before. But yeah, and I want to emphasize that. Well, this CAP may look controversial, like, already have censorship powers and really what this proposes is basically an emergency mechanism. That is also. + +[37:00] An emergency mechanism that is also transparent. So it seems quite unlikely that it can be abused meaningfully, And you know if our leaders conspire, to do something bad anyway, then probably will not learn about it, unlike, this protocol mechanism that is at least observable. But yeah, I, I think the bigger downside, to this CAP is really all the technical complexities this comes with and the fact that we kind of do not know if fact that we kind of do not know if fact that we kind of do not know if this is ever going to be used like of this is ever going to be used like of this is ever going to be used like of our response to potential things like to do, if something like risk, Archival corruption bug happens again. but of course we don't know what is. + +[38:00] But of course we don't know what is, that probably, what is that happening? So yeah guys, that's kind of the main. thing. Initial this camp and to summarize the implications it's. really like, in Canada, access entry and depending on the number of contexts, entry can appear in a number of operations will not be possible, so, yeah, any questions or thoughts on why, this is a good idea or bad idea, or we this is a good idea or bad idea, or we this is a good idea or bad idea, or we should do something else, are welcome. + +[40:00] Right, well, I will then pop here and again same. for the other CAP's. Please feel free to comment on the discussion thread. If you have any thoughts, questions, concerns or anything but. but I, I don't know, I think that's pretty, much it for today's presentation, I guess, while everyone is here, I just, want to remind everyone about SOP4. Which is a proposal to basically increase, the limits for pretty much every, resource, and so about two times, and also decrease non-refundable fees, like four times, which means basically more stuff to do on chain for cheaper which is- And please feel free to. + +[41:00] Which is. And please feel free to chime in the discussion. If you have any thoughts on this as well, And yeah, that thanks everyone. and see you on the next protocol meeting. + +
diff --git a/meetings/2026-01-29.mdx b/meetings/2026-01-29.mdx new file mode 100644 index 0000000000..64faa5c44e --- /dev/null +++ b/meetings/2026-01-29.mdx @@ -0,0 +1,83 @@ +--- +title: "BN254 Performance Upgrades and Closing the UX Gap Between Soroban and Native" +description: "We have one new CAP to discuss and that is CAP-80. This proposal adds BN254 Multi-Scalar Multiplication and modular arithmetic used in a variety of ZK proof applications." +authors: + - carsten-jacobsen + - dmytro-kozhevin + - jay-geng + - matias-wald + - siddharth-suresh +tags: [developer, CAP-73, CAP-80] +--- + +import YouTube from "@site/src/components/YouTube"; + + + +This protocol meeting covers CAP-80, which extends Stellar’s BN254 host support with multi-scalar multiplication and modular arithmetic to significantly speed up ZK-proof workloads. The session also includes a follow-up on CAP-73, focused on improving Soroban interoperability by making it easier to create new accounts and handle trustlines intentionally. + +### Key Points + +CAP-80: adds BN254 G1 multi-scalar multiplication (MSM), BN254 modular arithmetic host functions, and on-curve checks to improve ZK performance Rationale: reduces expensive guest-side math and avoids repeated point conversion overhead from many add/mul calls Expected impact: major performance gains for contracts using BN254-heavy proving workflows CAP-73 follow-up: enables creating new G-accounts when transferring XLM from Soroban (still requires covering the base reserve) CAP-73 trustlines: proposes an explicit trust(...) function to create missing trustlines when authorized, rather than doing it implicitly during transfers Resources: CAP-80 spec/discussion and CAP-73 spec/discussion links for deeper review + +### Resources + +- [CAP-73 Allow SAC to create G-account balances](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0073.md) · [Discussion](https://github.com/orgs/stellar/discussions/1668) +- [CAP-80 Host functions for efficient ZK BN254 use cases](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0080.md) · [Discussion](https://github.com/orgs/stellar/discussions/1826) + +
+ Video Transcript + +[00:00] All right, Yeah, So we can get started then. So yeah, today I'll be speaking about CAP 80 and then after that demo will follow up with, some updates to CAP 73. So I just pasted the CAP in the chat, So CAP 80 adds some host functions to enable some more OK uses on Stellar. So in Protocol 25 we added G1, add G1. So in Protocol 25 we added G1, add G1. So in Protocol 25 we added G1. Add G1. Multiply and a pairing host functions. Which gave Stellar BN24 feature. Parody with Ethereum. But this CAP proposes some additional host functions. to allow for better performance, So the functions are some: BN254. arithmetic host functions. BN254 G1 multiscale multiplication. And is on curve checks for BLSG1, BLSG2 and BLS. BN254G1. + +[01:00] BN254G1. BN254G1. So the know the with arithmetic, Host functions are expensive to implement on the guest side, which is why we're proposing adding these to the to. We're proposing adding these to the host, and the same applies for the on the host and the same applies for the on the host and the same applies for the on curve checks. We're also adding G1 multiscale multiplication. Because the cost of repeatedly converting points, between the external internal, representation can be expensive and these repeated add and multiplication, host function calls, do those conversions? On each call back and forth, which is pretty expensive. So this multiscale multiplication host function will allow a contract to only do this conversion, once at the beginning and the end, and we already have cost types for most, of these host functions. So the only one we need to add, is for BN2, BN254 G1 we need to add, is for BN2, BN254 G1 we need to add is for BN2, BN254 G1, MSM, MSM, MSM. So I don't think there's anything. that controversial about this change, We are. We're taking feedback from they are. We're taking feedback from the ecosystem on what, what they are. + +[02:00] Ecosystem on what, what they're asking for. But I, yeah, I wanted to open asking for. But I, yeah, I wanted to open asking for. But I, yeah, I wanted to open this up to any questions. If anyone has any opinions on any other host functions, we should add: yeah, Are there any? questions? If anyone does have any opinions on you, know what else we should add? Or look, into for, for specifically for the into. for specifically for the like OK host functions, You, you can post in this discussion. right here, that I posted in the chat, Yeah, so doesn't, so there's nothing. else. No one has any other questions, or input, Jay, you don't have anything to add, add, add then. then, see, see, see, no, I think it's fairly. straightforward. And these: + +[03:00] Straightforward. And these, yeah, these functions, or exist already. for BRS as well. Seems to be useful to have the reasons they didn't. They won't add it in the first place. Is likely, just we were trying to get it on. Likely, just we were trying to get it on likely, just we were trying to get it on par with in the first iteration and get feedback. So these are the second. on which makes sense to be added: AT&T. Yeah, makes sense. I see Mate. Furthermore, I see Mate. Matias is typing something. I can. follow up with the offline. Furthermore, I can. follow up with the offline as well- to. To send the-the discussion as well, to send the, the discussion thread to the people you think should be threaded to the people you think should be here that aren't But other than that. DIMM, if you're interested, if you're ready to present Cap 73. + +[04:00] AT&T. Yeah, go ahead, Dina. AT&T. All right, thank you, I, I guess. one thing you could mention for CAP 80. is that it's actually about three. times: improvement in performance for the contracts that actually try to use this, functions and some interesting applications. So yeah, seems like a very application. So yeah, seems like a very cool thing to have, All right visit. Let me mail my link: cat 73 links, cat 73, link, cat 73 and discussion of cat 73. And I'm pretty sure I have presented, it before, but that was a while ago. So it before, but that was a while ago. So it before, but that was a while ago. So I will quickly, go over the summary of the CAP and then go over the summary of the CAP and then go over the summary of the CAP, and then I will move on to the proposed changes. So I just realized that CAP does. not have the most recent PR merged. Let + +[05:00] Not have the most recent PR merged. Let me also, link the full request. That actually details the update I'm going to make. So CAP 73, is a CAP that basically closes. One of the gaps between turban and comic, operations, this gap being an operation, this gap being an ability to create new balances, for the G accounts from SAC, As you, may or may not know currently if a May or may not know currently if an account does not exist, you cannot. transfer XLM to it. And if a trust bank does not exist for a classic asset, you cannot use sack to perform an asset. You cannot use sack to perform a transfer to the respective G account, and this is a little bit annoying. because, in order to fix this, + +[06:00] Because in order to fix this, behavior, you need to actually go out of Sora ban ecosystem and go back to the classic operations, just generally quite suboptimal developer, Experience, experience, experience. So what's a CAP? Oh, I see CAP has. So what's a CAP? Oh, I see CAP has. So what's a CAP? Oh, I see CAP has just been merged. What the CAP does in the current iteration is just two. small changes to what sack does, First change is when we are dealing. with XLM tag, specifically transferring XLM to a G account. That does not exist yet will result in: creation of that G account, with respective XLM balance. And this part of the CAP has always been there and it- the CAP-has always been there, and it never changed. So it both basically allows creating new accounts, new G. + +[07:00] Allows creating new accounts. New G account from Saroyan and, in general, just closes the gap in behavior where now for XLM stack, you can just transfer to any address and if it is a G address, and an entry will just be created, which is a big win for the developer, Experience. The only caveat to be experienced, the only caveat to be aware about, is the transfer has to cover the base reserve for a new account, which is one XLM. So if transfer is below one XLM, it will still fail, as it. Below one XLM, it will still fail as it. Below one XLM, it will still fail as it does today. But there is really nothing you can do about it. It's just the can do about it. Furthermore, it's just the semantics of the accounts. What has been changed is trust and support. Before that, CAP had a couple functions. to deal with the trust lines and since then I just simplified this a lot to add. Then I just simplified this a lot to add. Then I just simplified this a lot to add only single function, and this function is called trust. + +[08:00] And this function is called trust. and it takes in an address and maybe creates a trust frame. So if you're dealing with a C address as this, will always be an old because there is, no trust for C addresses for a G. address. If there is an existing trust line, then again it's an OP and only when an address has no trust line. You, will require authorization from this, address. And if there is the proper authorization, provided the trust lane will be treated and the reason for why we are not going for the automated. approach, as we do with XLM is precisely the authorization part, because it is somewhat non-trivial observations, that transfer may sometimes require like, if we may transfer to non-existent trust. + +[09:00] We may transfer to non-existent trust. lines to request us from the transfer recipient and create a trust line That would result in a very non-trivial. developer experience. Because suddenly your transfers may sometimes require require additional loss from the recipient. But this behavior is like really an edge case, and it's hard to notice. So instead, CAP opts for a notice. So instead CAP opts for an explicit approach and the idea behind that is that well. If you actually do, want to create trust lines, you'll write: some code on the contract side to do that, and then you can implement your client logic accordingly to request the O from the recipient, And again, because of this OS requirement, I would say you do not expect this to. I would say you do not expect this to. I would say you do not expect this to be used in each and every protocol. because it is like non-trivial and + +[10:00] Because it is like non-trivial and may not be desired to actually request, anything from the receiver. So it might be easier to fail. There are some specific protocols where it is expected for new users to be unrammed so to speak, to the asset, and for these protocols, this would be a useful protocol, this would be a useful addition to do things within, within, addition, to do things within the protocol. I guess there is still the protocol. Furthermore, I guess there is still a point of debate for the automated. solution solution solution transfer to a non-existent transplant. would always require us. I'm like not would always require us. Furthermore, I'm like not super settled on the current way, So would be interested in hearing any feedback. And yeah, I guess another thing that is important, important to mention, is that this new allow trust function doesn't allow developers to specify the limit of the new trust line, and the reason for that really is that. + +[11:00] And the reason for that really is that. trust line limits are rather feature, that it is like about 2% of trust lines have limits, that are not effectively unlimited and the percentage may be even lower because they look at like one to the power of, they look at like one to the power of, they look at like one to the power of: sorry 10 to the^ of 18 trust lines, Really secure, could maybe even reduce, the threshold. So it is basically advanced functionality. And for what the CAP tries to do? It does not seem the CAP tries to do. It does not seem the CAP tries to do. It does not seem useful in any way. Like doesn't seem like contract developers can make an educated guess on what the new trust plan limit, should be, and a very safe default is and should be, and a very safe default is and should be, and a very safe default is an unlimited trust line. That users that do care about the limits can then manage, But I would expect such users to. actually set up the specialized trust, lines in advance, not being on ramp like some bridge protocol or something. So + +[12:00] Some bridge protocol or something. So just trying to keep protocol simple here, and make it straightforward enough for developer to use this, yeah, of the function. Change this will profile. Well it is technically low trust does the same. thing that change trust operation, but with a very specific set of parameters, I think, the fact that it shares semantics, for special use case of change trust, operation should not impact our naming decisions. But of course I'm open to suggestions for a different name, But the change trust comparison is really just to define the semantics u based + +[13:00] Just to define the semantics u based. on what we have in classic. It should no on what we have in classic. It should no on what we have in classic. Furthermore, it should no way impact how we name things and so on way impact how we name things and so on. Way impact how we name things and so on. Now for the allow trust name. I do now for the allow trust name. Furthermore, I do know there is such operation. Furthermore, I'm not known there is such operation. Furthermore, I'm not sure. Sure. Sure, if our decisions for bindings should, be impacted by classic names, the two quite separate domains in my opinion, But again, I'm open to propositions of, how this could be mean better. I mean, create trust maybe is an option. I, I mean, create trust maybe is an option. Furthermore, I find it a bit weird, not sure what? AT&T, but I shouldn't AT&T. So allow trust in on classic only Let you change the trust line flags. + +[14:00] Let you change the trust line flags. right, like it doesn't actually let you create a new trust line. So it's a creation a new trust line. So it's a little confusing, AT&T- Sure, but does it really matter? AT&T. I think Le's point is just that you like AT&T. I think Le's point is just that you like AT&T. Furthermore, I think Le's point is just that you like, if you're already familiar with allow, trust, you might think like it's not clear what this function does. AT&T. Well, maybe, on this, rather that function. exists in, so I'm not sure if you should exist in, so I'm not sure if you should exist in. so I'm not sure if you should base naming on what we have on classic, and I would prefer to use whatever. name conveys what this exact function does, and all trust may by no means be a perfect name. I'm I'm just saying be a perfect name. Furthermore, I'm I'm just saying that like I wouldn't look back on classic operations, I would just name it classic operations. Furthermore, I would just name it in a way that is clear enough, Create trust is not very clear to me. but I know, maybe, if it's clear to, everyone. I find this is. + +[15:00] Yeah, I guess we can. and maybe discuss this. Fine, Yeah, we, can just call it like you know, Trust this, whatever like we can name it. in any way that is appropriate s I'm just not sure like classic is. necessary, necessary, necessary, but but but AT&T. Yeah, I think it's just like, yeah, we can AT&T. Yeah, I think it's just like, yeah, we can AT&T. Yeah, I think it's just like, yeah, we can talk about this offline. Yeah, we can have a completely unique name for sort, of when I'm planning with that. What I'm really interested in is if anyone has any thoughts on automated trust link, Creation versus a separate function, because we still have a separate, function. And I kind of mentioned my motivation around that. But I wonder if anyone disagrees with that motivation. and thinks that you know it's actually fine to just always try to create a trust line unconditionally, And if a developer her cares about the + +[16:00] And if a developer her cares about the scenario, just let them handle this on the client side. And if they don't care about the operation would fail in any case, whether we have a function or not. So, yeah, just curious if anyone has any thoughts, AT&T. But you're saying like in, like if, like a transfer, would automatically create a trust line like that scenario. Yeah, I feel like that would be a pretty. Yeah, I feel like that would be a pretty. Yeah, I feel like that would be a pretty big break from what we currently do I. I big break from what we currently do. I. Furthermore, I like I. I wouldn't be surprised if it is like I. I wouldn't be surprised if it is like I. I wouldn't be surprised if it would mess up a lot of like apps in would mess up a lot of like apps in would mess up a lot of like apps in the ecosystem. If suddenly you can create as many trust lines as you want, The other issue is accounts can only hold. AT&T. Sorry I. I must be clear. Like what I'm AT&T. Sorry I. I must be clear. Like what I'm AT&T. Sorry I. I must be clear. Like what I'm saying is allow trust function would be called unconditionally on transfers, so to speak, which means that we will still require authorization from the trust, line owner. It's not like we create trust lines without permission. We are. + +[17:00] Trust lines without permission. We are not changing semantics. It's just about not changing semantics. Furthermore, it's just about explicit versus implicit. And here we do this explicitly so that creators of the protocols, put this line into their code, and they kind of know what they are doing versus an implicit approach where we require us from the trust line owner, and if you handle this on the client side, somehow then you can actually create the trust, plane, given the receivers authorization So yeah, it is basically just about being more intentional of being making, this automated and then letting people figure this out on the client side, only on the client side. So yeah, but the authorization requirements do not. change, change, change AT&T. Okay, Yeah, I see what you're saying. + +[18:00] Matias? Which one do you think is better? Better than Which case is better than which case? Bit confused, I think. So are you comparing like the current? approach to the previous approach, in the cup, or are you comparing the explicit versus implicit approaches? + +[19:00] Somebody ask: could you please expand on? your point? Because I'm a bit lost in which experience do you think is better? in your opinion? Cannot hear you, I guess I can hear you. Furthermore, I guess I can hear you. AT&T. Can you guys hear me now? AT&T. Okay, I can hear you now. Yeah, AT&T. All right. Yeah, Push to talk always. keeps me All right. And now I'm saying: all I'm saying is: it's the CAP makes it. All I'm saying is: it's the CAP makes it. All I'm saying is: it's the CAP makes it easier to work with, there It's. It's an easier to work with there It's. It's a straightforward way to just create a G. account balance-right account balance, right AT&T. Oh yeah, definitely Yeah. AT&T Within the contract. So that that's what I'm saying, Instead of just having to. + +[20:00] I'm saying, Instead of just having to, build, you know the, the whole trust, lines on the outside, and then having that ready you have, then you're ready. to payment. This way, you just make your payment and done with AT&T. Oh yeah, of course. Yeah, I see a point. AT&T. Oh yeah, of course. Yeah, I see a point. AT&T. Oh yeah, of course. Yeah, I see a point. Like, if your feedback is generally about the cup. Yeah, totally, that's the idea. behind the cup. Is that we want to make behind the cup? Is that we want to make behind the cup? Is that we want to make it simpler to create trust lines in Saroyan or possible. And yeah, we only have some small technical things to work, out. But, yeah, thanks AT&T, thanks anyway. Yeah, AT&T okay, AT&T. It makes it very easy to interrupt with AT&T. It makes it very easy to interrupt with AT&T. It makes it very easy to interrupt with the classic site, the with the classic site. Yeah, Yeah, Yeah, AT&T, especially if you want to, transfer, if you're, if you have, if transferred, if you're, if you have is transferred, if you're, if you have, if you're set up on the server side, and then by any chance you have with a G, then by any chance you have with a G, then by any chance you have with a G account or create a G account to push forward payment, to say, or. + +[21:00] To push forward payment to say, or something this way, you AT&T, I, had a lot of interruptions, but, but I, you had a lot of interruptions, but, but I, you had a lot of interruptions, but I think I kind of got your, u idea. Yeah, definitely creating G accounts, like specifically creating G accounts, is definitely going to be very simple and yes, there's no friction there, So that's like the most simple thing to. do. Okay, Lee has left some feedback, as well. Yeah, exactly So, basically this point. is pretty much what I've been, thinking as well, that it is surprising if transfer requires more. signatures, signatures, signatures, and u yeah, there is a risk of users just. + +[22:00] And u? Yeah, there is a risk of users just and u? Yeah, there is a risk of users just not noticing it, whereas we allow trust, we only leave this functionality to the. protocols that can actually support this, meaningfully, which, as I've already mentioned, is probably just a fraction of the protocols. It's not something that everyone will be able to do, And of course, if ever, yeah, if in. the future, somehow trust domain, requirements are relaxed, then of course we'll revisit this, But for today, if we'll revisit this, But for today, it. seems like explicit is better than implicit. That's my opinion as well. So you folks are typing. Yeah, 8 to 20 rule. Yeah, I think I. Yeah, 8 to 20 rule. Yeah, I think I. Yeah, 8 to 20 rule. Yeah, I think it may be applied here. Yeah, I don't think everyone will need the trans, functionality. But for the excellent transfers: yeah, they'll work for everyone. So, yeah. + +[23:00] Yeah, exactly, We can always do this in. automated fashion, if it proves to be more useful, All right, If you have any feedback, the link to. the description has been posted above. and yeah, please feel free to leave. any additional feedback suggestions or concerns there. And thanks everyone I do not think we have anything else. Furthermore, I do not think we have anything else. Oh, actually, one small thing. Let me just drop a link. There is no CAP for this one. It's more of an announcement there is this get wedge host function. That no one on chain is actually using. So + +[24:00] No one on chain is actually using. So you may not even know about it and if you did not use it before, please do. If you did not use it before, please do. If you did not use it before, please do not start using it again, because what we want to do is to deprecate it and make want to do is to deprecate it and make want to do is to deprecate it and make it no longer work. Basically, just freeze it at some protocol version and the motivation is listed in the discussion itself, but basically the idea is that you cannot write a. contract that will benefit from this function. Like yeah, there is just really no way to use this function in really no way to. To use this function in really no way to. To use this function in a meaningful fashion and using it is likely a foot gun, which is why we decided that we want to clean this, up- there is no CAP for this yet, but up, there is no CAP for this yet, but up, there is no CAP for this yet. But you can imagine this will be a super. simple CAP, And yeah, we just open discussion a go to see if there is anyone that actually thinks that this is. + +[25:00] Actually thinks that this is actually thinking that this is something useful. But we are pretty sure yeah, there is no way to use it. appropriately. So we just deprecated it. And yeah again if you have any, comments for this one. Please feel free to leave on the discussion just an. announcement, basically to bring everyone's attention to this and now, I think that's pretty much it for today's meeting, So thanks everyone, and the next part. + +
diff --git a/meetings/README.md b/meetings/README.md new file mode 100644 index 0000000000..563274ff0c --- /dev/null +++ b/meetings/README.md @@ -0,0 +1,75 @@ +# Developer Meetings + +This folder contains the meeting posts and a helper script to generate new +meeting pages from a YouTube video ID (11 characters). + +## Script + +`meetings/new-meeting.py` + +What it does: + +- Downloads YouTube captions (VTT) via `yt_dlp`. +- Builds a transcript with 1-minute blocks. +- Adds punctuation and spellcheck. +- Creates a new `meetings/YYYY-MM-DD.mdx` page with front-matter and YouTube + embed. + +What it doesn't do: + +- Draft a perfect description. +- Add a helpful resources section. +- Pull the actual meeting date when different from video upload date. +- Handle days when there are multiple meetings to put together. + +### Requirements + +Python 3.9+ and a virtual environment. + +### Setup + +```bash +python -m venv .venv +.\.venv\Scripts\activate +pip install yt_dlp webvtt-py deepmultilingualpunctuation language_tool_python +``` + +Spellcheck requires Java (used by `language_tool_python`). If Java isn’t +installed, spellcheck will be skipped with a warning. + +Java install (any platform): + +- Install a [recent JDK](https://www.oracle.com/java/technologies/downloads) + (Java 17+ recommended). +- Make sure `java` is on your PATH (`java -version` should work). + +### Basic usage + +Run and answer the prompts: + +```bash +python meetings/add-transcript-box.py +``` + + + +Or pass values directly: + +```bash +python meetings/add-transcript-box.py \ + --video VIDEO_ID \ + --authors name-slug \ + --tags developer +``` + +This will: + +- Download captions into memory (no `transcripts_out/` file by default) +- Create `meetings/YYYY-MM-DD.mdx` using the YouTube upload date + +## Notes + +- `--authors` must match IDs in `meetings/authors.yml`. +- Use `--no-create-page` to skip MDX generation and only export captions. +- Use `--save-txt` to also write `transcripts_out/VIDEO_ID.en.txt` or use + `--keep-vtt` to keep raw closed-caption files. diff --git a/meetings/authors.yml b/meetings/authors.yml new file mode 100644 index 0000000000..8180832e7c --- /dev/null +++ b/meetings/authors.yml @@ -0,0 +1,791 @@ +ada-vaughan: + name: Ada Vaughan + socials: + x: AdaVaughan + linkedin: adavaughan +alberto-chaves: + name: Alberto Chaves + title: CEO & Product Lead, Trustless Work + url: https://www.trustlesswork.com + image_url: https://github.com/techrebelgit.png + socials: + github: techrebelgit + x: TechRebelWorld + linkedin: alberto-chaves-costarica +alejo-mendoza: + name: Alejo Mendoza + url: https://github.com/alejomendoza + image_url: https://github.com/alejomendoza.png + socials: + github: alejomendoza + x: alejoskyhitz + linkedin: alejo-mendoza +alex-cordeiro: + name: Alex Cordeiro + title: VP Engineering + url: https://github.com/accordeiro + image_url: https://github.com/accordeiro.png + socials: + github: accordeiro + x: alexccordeiro + linkedin: alexccordeiro +alex-mootz: + name: Alex Mootz + url: https://github.com/mootz12 + image_url: https://github.com/mootz12.png + socials: + github: mootz12 + x: alexmootz + linkedin: alexjmootz +amilcar-erazo: + name: Amilcar Erazo + url: https://github.com/ajoseerazo + image_url: https://github.com/ajoseerazo.png + socials: + github: ajoseerazo + x: ajeraz0x + linkedin: amilcar-erazo-721a1a65 +amisha-singla: + name: Amisha Singla + title: Senior Software Engineer + url: https://bugbie.blogspot.com + image_url: https://github.com/amishas157.png + socials: + github: amishas157 + x: amishas157 + linkedin: amishas157 +amit-sharma: + name: Amit Sharma + socials: + x: ASharma_VT + linkedin: asharmavt +andy-wermke: + name: Andy Wermke + url: https://andywer.com + image_url: https://github.com/andywer.png + socials: + github: andywer + x: andywritescode + linkedin: awermke +anke-liu: + name: Anke Liu + title: Ecosystem Growth Lead + url: https://github.com/ankeliu + image_url: https://github.com/ankeliu.png + page: true + socials: + github: ankeliu + x: anke_g_liu + linkedin: ankeliu +anna-greenwald: + name: Anna Greenwald + socials: + x: AnnaGreenwald + linkedin: anna-greenwald-b898083 +anthony-barker: + name: Anthony Barker + url: https://github.com/antb123 + image_url: https://github.com/antb123.png + socials: + github: antb123 + x: anthony_barker + linkedin: anthony-barker-15288a +anuhya-challagundla: + name: Anuhya Challagundla + socials: + x: anuxhya + linkedin: a-chall +armen-ter-avetisyan: + name: Armen Ter‑Avetisyan + title: Technical Account Manager, Certora + url: https://github.com/teryanarmen + image_url: https://github.com/teryanarmen.png + socials: + github: teryanarmen + linkedin: armen-ter-avetisyan-92195a183 +bastian-koh: + name: Bastian Koh + url: https://bento.me/bastiankoh + image_url: https://github.com/alexanderkoh.png + socials: + github: alexanderkoh + x: bastiankoh + linkedin: bastiankoh +bri-wylde: + name: Bri Wylde + url: https://github.com/briwylde08 + image_url: https://github.com/briwylde08.png + socials: + github: briwylde08 + linkedin: briwylde +brieuc-berruet: + name: Brieuc Berruet + socials: + x: Briwind + linkedin: brieucberruet +carmen-hett: + name: Carmen Hett + socials: + linkedin: carmen-hett- +carsten-jacobsen: + name: Carsten Jacobsen + title: Senior Developer Advocate + url: https://github.com/carstenjacobsen + image_url: https://github.com/carstenjacobsen.png + page: true + socials: + github: carstenjacobsen + x: carstenjacobsen + linkedin: carstenjacobsendk +chad-ostrowski: + name: Chad Ostrowski + title: CEO, Aha Labs + url: https://stellar.org/blog/foundation-news/building-smart-contracts-faster-talking-efficient-development-w-aha-labs-co-founder-chad-o + image_url: https://github.com/chadoh.png + socials: + github: chadoh + x: chadoh + linkedin: chadoh +chandrakana-nandi: + name: Chandrakana Nandi + title: Director of US R&D, Certora + url: https://cnandi.com + image_url: https://github.com/chandrakananandi.png + socials: + github: chandrakananandi + x: ChandrakanaNaN + linkedin: chandrakana-nandi-50358261 +chris-anatalio: + name: Chris Anatalio + title: Senior Developer Advocate + url: https://github.com/anataliocs + image_url: https://github.com/anataliocs.png + socials: + github: anataliocs + x: canatalio + linkedin: anataliocs +dante-disparte: + name: Dante Disparte + socials: + x: ddisparte + linkedin: dantedisparte +david-mazieres: + name: David Mazières + title: Chief Scientist + url: https://github.com/stanford-scs + image_url: https://github.com/stanford-scs.png + page: true + socials: + github: stanford-scs + x: dmazieres + linkedin: david-mazières-20a92924 +debnil-sur: + name: Debnil Sur + url: https://github.com/debnil + image_url: https://github.com/debnil.png + socials: + github: debnil + x: debnilsur + linkedin: debnilsur +denelle-dixon: + name: Denelle Dixon + title: CEO and Executive Director + socials: + x: DenelleDixon + linkedin: denelle-dixon-967a236 +diana-mejia: + name: Diana Mejía + socials: + x: diana_mejia5 + linkedin: diana-margarita-mejia-27a463 +diego-yanez: + name: Diego Yanez + url: https://stellar.org/blog/ecosystem/scf-spotlight-diego-yanez-of-alfred + socials: + x: DiegoYDice + linkedin: diegoyanez1 +dmytro-kozhevin: + name: Dmytro Kozhevin + url: https://github.com/dmkozh + image_url: https://github.com/dmkozh.png + socials: + github: dmkozh + linkedin: dmytro-kozhevin-7433b834 +earrietadev: + name: Enrique A. + url: https://github.com/earrietadev + image_url: https://github.com/earrietadev.png + socials: + github: earrietadev + x: earrietadev + linkedin: earrietadev +elliot-voris: + name: Elliot Voris + title: Senior Developer Advocate + url: https://github.com/ElliotFriend + image_url: https://github.com/ElliotFriend.png + page: true + socials: + github: ElliotFriend + x: ElliotFriend + linkedin: elliotfriend +emir-ayral: + name: Emir Ayral + socials: + x: StellarEmir + linkedin: emirayral +erasmus-hagen: + name: Erasmus Hagen + socials: + github: erasmus + x: specksnydern + linkedin: erasmushagen +eric-saunders: + name: Eric Saunders + title: Director of Engineering + url: https://github.com/ire-and-curses + image_url: https://github.com/ire-and-curses.png + socials: + github: ire-and-curses + linkedin: eric-saunders +ernesto-contreras: + name: Ernesto Contreras + socials: + x: ernestocontrer + linkedin: contrerasernesto +esteban-iglesias: + name: Esteban Iglesias + url: https://estebanweb.cl + image_url: https://github.com/esteblock.png + socials: + github: esteblock + x: esteblock_dev + linkedin: esteban-iglesias +fernando-castillo: + name: Fernando Castillo + url: https://github.com/fercastdev + image_url: https://github.com/fercastdev.png + socials: + github: fercastdev + x: fscastil + linkedin: fernando-castillo-97b390120 +fifo-fazzatti: + name: Fifo Fazzatti + title: Founder, Stellar Plus + url: https://github.com/fazzatti + image_url: https://github.com/fazzatti.png + socials: + github: fazzatti +francisco-catrileo: + name: Francisco Catrileo + url: https://github.com/chopan123 + image_url: https://github.com/chopan123.png + socials: + github: chopan123 + linkedin: francisco-catrileo-3908646a +fred-rezeau: + name: Frederic Rezeau + title: Founder, Litemint + url: https://kyungj.in + image_url: https://github.com/fredericrezeau.png + socials: + github: FredericRezeau + x: FredericRezeau + linkedin: fredericrezeau +gabriel-bizma: + name: Gabriel Bizma + socials: + x: BizamaGabriel + linkedin: gabriel-r-bizama +garand-tyson: + name: Garand Tyson + title: Senior Core Engineer + url: https://github.com/sirtyson + image_url: https://github.com/sirtyson.png + page: true + socials: + github: sirtyson + x: gttyson + linkedin: garandtyson +gbubemi-agbeyegbe: + name: Gbubemi Agbeyegbe + socials: + linkedin: gbubemi-agbeyegbe-ab785912 +geoff-ramseyer: + name: Geoff Ramseyer + url: https://www.scs.stanford.edu/~geoff + image_url: https://github.com/gramseyer.png + socials: + github: gramseyer + x: GeoffRamseyer +george-kudrayvtsev: + name: George Kudrayvtsev + url: https://teapowered.dev + title: Senior Software Engineer + image_url: https://github.com/shaptic.png + socials: + github: shaptic + linkedin: george-kudrayvtsev-91624859 +ginger-baker: + name: Ginger Baker + title: SDF Board Member + socials: + x: gingerkbaker + linkedin: bakerginger +graydon-hoare: + name: Graydon Hoare + title: Principal Architect + url: https://github.com/graydon + image_url: https://github.com/graydon.png + socials: + github: graydon +ishan-singh: + name: Ishan Singh + url: https://github.com/NotIshanSingh + image_url: https://github.com/NotIshanSingh.png + socials: + github: NotIshanSingh + x: NotIshanSingh + linkedin: ishan-singh-886551291 +ivan-mudryj: + name: Iván Agustín Mudryj + socials: + x: ivanmudryj + linkedin: ivanmudryj +jake-kendall: + name: Jake Kendall + socials: + x: JakeKendall5 + linkedin: kendalljake +jake-urban: + name: Jake Urban + title: Senior Engineering Manager + url: https://github.com/jakeurban + image_url: https://github.com/jakeurban.png + page: true + socials: + github: jakeurban + x: jakeurban_ + linkedin: jakeurban +jane-wang: + name: Jane Wang + url: https://github.com/janewang + image_url: https://github.com/janewang.png + socials: + github: janewang + linkedin: janelwang +jay-geng: + name: Jay Geng + title: Senior Core Engineer + url: https://github.com/jayz22 + image_url: https://github.com/jayz22.png + socials: + github: jayz22 + linkedin: jayz22 +jed-mccaleb: + name: Jed McCaleb + title: Chief Architect + url: https://github.com/jedmccaleb + image_url: https://github.com/jedmccaleb.png + socials: + github: jedmccaleb + x: jedmccaleb + linkedin: jed-mccaleb-4052a4 +jeesun-kim: + name: Jeesun Kim + title: Senior Software Developer + url: https://codeandfood.com + image_url: https://github.com/jeesunikim.png + socials: + github: jeesunikim + x: codeandfood + linkedin: jeesunkim +john-rubisoff: + name: John Rubisoff + socials: + x: spintax2 + linkedin: johnrubisoff +john-wooten: + name: John Wooten + url: https://jfwooten4.com + image_url: https://github.com/JFWooten4.png + socials: + github: jfwooten4 + x: jfwooten4 + linkedin: jfwooten4 +johnny-goodnow: + name: Johnny Goodnow + url: https://github.com/theaeolianmachine + image_url: https://github.com/theaeolianmachine.png + socials: + linkedin: jgoodnow + github: theaeolianmachine +jonathan-jove: + name: Jonathan Jove + title: Principal Engineer + url: https://jonjove.com + image_url: https://github.com/jonjove.png + socials: + github: jonjove + x: jonathan_jove + linkedin: jonathan-jove-3271a020a +joseph-benson: + name: Joseph Benson-Aruna + url: https://www.josephbenson.me + socials: + linkedin: josephbensonaruna +julian-martinez: + name: Julian Martinez + url: https://github.com/Julian-dev28 + image_url: https://github.com/Julian-dev28.png + socials: + github: Julian-dev28 + x: j_dev28 + linkedin: julian-martinez28 +justin-rice: + name: Justin Rice + title: VP of Ecosystem + url: https://github.com/rice2000 + image_url: https://github.com/rice2000.png + page: true + socials: + github: rice2000 + x: jricetweets + linkedin: justin-rice-9b8942253 +justin-trollip: + name: Justin Trollip + url: https://github.com/HariSeldon23 + image_url: https://github.com/HariSeldon23.png + socials: + github: HariSeldon23 + x: jtrollip + linkedin: justintrollip +kalepail: + name: Tyler van der Hoeven + title: Developer Advocate Director + url: https://github.com/kalepail + image_url: https://github.com/kalepail.png + page: true + socials: + github: kalepail + x: kalepail + linkedin: tyvdh +karen-chang: + name: Karen Chang + socials: + linkedin: changkc +kolten-bergeron: + name: Kolten Bergeron + url: https://kolten.fyi + socials: + linkedin: kolten + x: 0xKolten +leigh-mcculloch: + name: Leigh McCulloch + title: Principal Software Engineer + url: https://leighm.cc + image_url: https://github.com/leighmcculloch.png + socials: + github: leighmcculloch + x: ___leigh___ + linkedin: leighmcculloch +lisa-nestor: + name: Lisa Nestor + socials: + x: nestorious828 + linkedin: lisacnestor +marta-lokhova: + name: Marta Lokhova + url: https://github.com/marta-lokhova + image_url: https://github.com/marta-lokhova.png + socials: + github: marta-lokhova +matias-wald: + name: Matias Wald + url: https://github.com/waldmatias + image_url: https://github.com/waldmatias.png + socials: + github: waldmatias + x: waldmatias + linkedin: waldmatias +mercedes-vila: + name: Mercedes Llados Vila + socials: + linkedin: mercedesmadanire +michael-feldstein: + name: Michael Feldstein + url: https://github.com/msfeldstein + image_url: https://github.com/msfeldstein.png + socials: + github: msfeldstein + x: msfeldstein + linkedin: msfeldstein +morgan-wilde: + name: Morgan Wilde + url: https://github.com/wildework + image_url: https://github.com/wildework.png + socials: + github: wildework + x: sorobandev +morley-zhi: + name: Morley Zhi + url: https://mo.rley.co + image_url: https://github.com/morleyzhi.png + socials: + github: morleyzhi + x: morley + linkedin: morleyzhi +naman-kumar: + name: Naman Kumar + title: Product Manager + url: https://github.com/namankumar + image_url: https://github.com/namankumar.png + socials: + github: namankumar +nando-vieira: + name: Nando Vieira + title: Senior Software Engineer + url: https://fnando.com + image_url: https://github.com/fnando.png + socials: + github: fnando + x: fnando + linkedin: fnando +nat-robinson: + name: Nat Robinson + socials: + linkedin: nat-robinson +nicolas-barry: + name: Nicolas Barry + title: Chief Technology Officer + url: https://keybase.io/monsieurnicolas + image_url: https://github.com/monsieurnicolas.png + socials: + github: monsieurnicolas + x: nicolassf + linkedin: nicolas-barry +nicole-adair: + name: Nicole Adair + title: Head of Developer Relations + socials: + linkedin: nicoleadair +nikhil-saraf: + name: Nikhil Saraf + url: https://github.com/nikhilsaraf + image_url: https://github.com/nikhilsaraf.png + socials: + github: nikhilsaraf + x: nikhilsaraf9 + linkedin: sarafnikhil +oleg-andreev: + name: Oleg Andreev + url: https://oleganza.com + image_url: https://github.com/oleganza.png + socials: + github: oleganza + x: oleganza + linkedin: oleganza +olufunto-boroffice: + name: Olufunto Boroffice + socials: + linkedin: olufunto-boroffice +opeyemi-woyemi: + name: Opeyemi Awoyemi + url: https://hello.cv/opeawo + socials: + x: opeawo + linkedin: opeawo +orbitlens: + name: OrbitLens + url: https://github.com/orbitlens + image_url: https://github.com/orbitlens.png + socials: + github: orbitlens + x: orbitlens +pamphile-roy: + name: Pamphile Roy + url: https://github.com/tupui + image_url: https://github.com/tupui.png + socials: + github: tupui + x: PamphileRoy + linkedin: tupui +paul-bellamy: + name: Paul Bellamy + url: https://paulbellamy.com + image_url: https://github.com/paulbellamy.png + socials: + github: paulbellamy + x: pyrhho +piyal-basu: + name: Piyal Basu + title: Product Manager, Freighter + url: https://github.com/piyalbasu + image_url: https://github.com/piyalbasu.png + socials: + github: piyalbasu + linkedin: piyal-basu-837163b +plamen-hristov: + name: Plamen Hristov + url: https://github.com/PlamenHristov + image_url: https://github.com/PlamenHristov.png + socials: + github: PlamenHristov + linkedin: hristovdeveloper +riad-wahby: + name: Riad Wahby + url: https://github.com/kwantam + image_url: https://github.com/kwantam.png + socials: + github: kwantam +rick-groothuizen: + name: Rick Groothuizen + url: https://github.com/grthzn + image_url: https://github.com/grthzn.png + socials: + github: grthzn + x: rgrthzn + linkedin: rick-groothuizen +roberto-durscki: + name: Roberto Durscki + socials: + linkedin: durscki +robin-olthuis: + name: Robin Olthuis + url: https://github.com/robeartt + image_url: https://github.com/robeartt.png + socials: + github: robeartt + linkedin: robin-olthuis-802194262 +rohit-sinha: + name: Rohit Sinha + url: https://rsinha.github.io + image_url: https://github.com/rsinha.png + socials: + github: rsinha + x: sinharo + linkedin: rohit-sinha-0a0a906 +sam-sealey: + name: Sam Sealey + socials: + x: samconnerone + linkedin: samsealey +shawn-reuland: + name: Shawn Reuland + title: Senior Software Engineer + url: https://github.com/sreuland + image_url: https://github.com/sreuland.png + socials: + github: sreuland + linkedin: shawn-reuland-71b697 +siddharth-suresh: + name: Siddharth Suresh + url: https://github.com/sisuresh + image_url: https://github.com/sisuresh.png + socials: + github: sisuresh +simon-chow: + name: Simon Chow + url: https://github.com/chowbao + image_url: https://github.com/chowbao.png + socials: + github: chowbao +steve-walker: + name: Steve Walker + url: https://stevewalker.net + socials: + x: stevewalkr + linkedin: stevejwalker +sydney-wiseman: + name: Sydney Wiseman + title: Senior Engineering Manager + socials: + linkedin: syd-wiseman +tamir-sen: + name: Tamir Sen + url: https://github.com/tamirms + image_url: https://github.com/tamirms.png + socials: + github: tamirms +timothy-baker: + name: Timothy Baker + url: https://github.com/silence48 + image_url: https://github.com/silence48.png + socials: + github: silence48 + x: silence48 + linkedin: tim4802 +tom-quisel: + name: Tom Quisel + url: https://tomquisel.com + image_url: https://github.com/tomquisel.png + socials: + github: tomquisel + x: tquisel + linkedin: tomquisel +tomer-weller: + name: Tomer Weller + title: Chief Product Officer + url: https://github.com/tomerweller + image_url: https://github.com/tomerweller.png + page: true + socials: + github: tomerweller + x: tomerweller + linkedin: tomerweller +tommaso-de-ponti: + name: Tommaso De Ponti + url: https://heytdep.github.io + image_url: https://github.com/heytdep.png + socials: + github: heytdep + x: heytdep +tori-samples: + name: Tori Samples + socials: + x: tori_samples + linkedin: tori-samples-a0b85055 +tsachi-herman: + name: Tsachi Herman + url: https://github.com/tsachiherman + image_url: https://github.com/tsachiherman.png + socials: + github: tsachiherman + x: TsachiHerman + linkedin: tsachi +willem-wyndham: + name: Willem Wyndham + title: CTO, Aha Labs + url: https://github.com/willemneal + image_url: https://github.com/willemneal.png + socials: + github: willemneal + x: willemneal + linkedin: willem-wyndham +wiza-jalakasi: + name: Wiza Jalakasi + url: https://wiza.jalaka.si + image_url: https://github.com/wizaj.png + socials: + github: wizaj + x: wizaj + linkedin: wizaj +wouter-arkink: + name: Wouter Arkink + url: https://github.com/wouter1986 + image_url: https://github.com/wouter1986.png + socials: + github: wouter1986 + x: pickingunicorns +yan-michalevsky: + name: Yan Michalevsky + url: https://www.michalevsky.com + image_url: https://github.com/ymcrcat.png + socials: + github: ymcrcat + x: ymcrcat + linkedin: yanmichalevsky +zachary-freundt: + name: Robin F. + socials: + linkedin: zachary-freundt diff --git a/meetings/new-meeting.py b/meetings/new-meeting.py new file mode 100644 index 0000000000..39d3b7923c --- /dev/null +++ b/meetings/new-meeting.py @@ -0,0 +1,500 @@ +#!/usr/bin/env python3 +import argparse +import datetime as dt +import json +import pathlib +import re +import subprocess +import sys +from typing import Dict, Iterator, List, Optional, Tuple + +YOUTUBE_ID_RE = re.compile(r"(?:v=|\/)([0-9A-Za-z_-]{11})(?:\?|&|\/|$)") +VTT_TIME_RE = re.compile(r"^(\d{2}):(\d{2}):(\d{2})\.(\d{3})$") +_PUNCTUATION_MODEL = None +_LANGUAGE_TOOL = None +_STOPWORDS = { + "a", "an", "and", "are", "as", "at", "be", "but", "by", "for", "from", "has", "have", "he", "her", + "his", "how", "i", "if", "in", "is", "it", "its", "me", "my", "not", "of", "on", "or", "our", + "she", "so", "that", "the", "their", "them", "they", "this", "to", "up", "us", "we", "were", + "what", "when", "where", "which", "who", "will", "with", "you", "your", +} +_SUMMARY_KEYWORDS = { + "cap", "caps", "protocol", "upgrade", "validator", "ledger", "freeze", "ttl", "extension", + "host", "function", "functions", "address", "strkey", "soroban", "muxedaddress", + "network", "config", "vote", "voting", "contract", "contracts", "sdk", +} + +def extract_video_id(url_or_id: str) -> Optional[str]: + s = url_or_id.strip() + if len(s) == 11 and re.fullmatch(r"[0-9A-Za-z_-]{11}", s): + return s + m = YOUTUBE_ID_RE.search(s) + return m.group(1) if m else None + +def read_list(path: pathlib.Path) -> Iterator[str]: + with path.open(encoding="utf-8") as f: + for line in f: + clean = line.strip() + if clean and not clean.startswith("#"): + yield clean + +def vtt_to_text(vtt_path: pathlib.Path) -> str: + import webvtt # pip install webvtt-py + parts = [] + for cue in webvtt.read(str(vtt_path)): + parts.append(cue.text.strip()) + return "\n".join(filter(None, parts)) + "\n" + +def _parse_vtt_time(ts: str) -> float: + m = VTT_TIME_RE.match(ts) + if not m: + return 0.0 + hours, minutes, seconds, millis = (int(x) for x in m.groups()) + return hours * 3600 + minutes * 60 + seconds + (millis / 1000.0) + +def _clean_caption_text(text: str) -> str: + text = re.sub(r"<[^>]+>", "", text) + text = re.sub(r"AGT;+", "", text) + text = re.sub(r"\b(?:um+|uh+|erm+|hmm+|mm+)\b[,.]?", "", text, flags=re.IGNORECASE) + text = re.sub(r"\b(cap|sep|slp)s?\b", lambda m: m.group(1).upper() + ("s" if m.group(0).lower().endswith("s") else ""), text) + text = re.sub(r"\bprotocol\s+(\d+)\b", lambda m: f"Protocol {m.group(1)}", text, flags=re.IGNORECASE) + text = re.sub(r"\bstella?r\b", "Stellar", text, flags=re.IGNORECASE) + text = re.sub(r"\bopen\s*(?:zeppelin|zepplin|zepelin|rubin)\b", "OpenZeppelin", text, flags=re.IGNORECASE) + text = re.sub(r"\b(sorond|soron|soran|soroban|orb[áa]n|soro?b[oa]n)\b", "Soroban", text, flags=re.IGNORECASE) + text = text.replace("\n", " ") + text = re.sub(r"\s+", " ", text) + return text.strip() + +def _format_timestamp(seconds: int) -> str: + mins = seconds // 60 + secs = seconds % 60 + return f"{mins:02d}:{secs:02d}" + +def _yaml_escape(value: str) -> str: + return value.replace("\\", "\\\\").replace("\"", "\\\"") + +def _punctuate_text(text: str, enabled: bool) -> str: + if not enabled: + return text + try: + from deepmultilingualpunctuation import PunctuationModel # type: ignore + except Exception: + print("warning: deepmultilingualpunctuation not installed; skipping punctuation", file=sys.stderr) + return text + global _PUNCTUATION_MODEL + if _PUNCTUATION_MODEL is None: + try: + _PUNCTUATION_MODEL = PunctuationModel() + except Exception as exc: + print(f"warning: punctuation model failed; skipping punctuation ({exc})", file=sys.stderr) + return text + return _PUNCTUATION_MODEL.restore_punctuation(text) + +def _spellcheck_text(text: str, enabled: bool) -> str: + if not enabled: + return text + try: + import language_tool_python # type: ignore + except Exception: + print("warning: language_tool_python not installed; skipping spellcheck", file=sys.stderr) + return text + if len(text) > 12000: + print("warning: text too long for spellcheck; skipping", file=sys.stderr) + return text + global _LANGUAGE_TOOL + if _LANGUAGE_TOOL is None: + try: + _LANGUAGE_TOOL = language_tool_python.LanguageTool("en-US") + except Exception as exc: + print(f"warning: spellcheck init failed; skipping spellcheck ({exc})", file=sys.stderr) + return text + return _LANGUAGE_TOOL.correct(text) + +def _dedupe_repeated_phrases(text: str) -> str: + tokens = text.split() + if len(tokens) < 6: + return text + max_window = 8 + max_passes = 3 + for _ in range(max_passes): + i = 0 + changed = False + while i < len(tokens): + window = min(max_window, (len(tokens) - i) // 2) + matched = False + for n in range(window, 1, -1): + a = tokens[i:i + n] + b = tokens[i + n:i + 2 * n] + if len(b) < n: + continue + norm_a = [re.sub(r"^\W+|\W+$", "", t).lower() for t in a] + norm_b = [re.sub(r"^\W+|\W+$", "", t).lower() for t in b] + if norm_a == norm_b and any(norm_a): + del tokens[i + n:i + 2 * n] + changed = True + matched = True + break + if not matched: + i += 1 + if not changed: + break + return " ".join(tokens) + +def _split_sentences(text: str) -> List[str]: + parts = re.split(r"(?<=[.!?])\s+", text.strip()) + return [p.strip() for p in parts if p.strip()] + +def _tokenize(text: str) -> List[str]: + return [ + w.lower() + for w in re.findall(r"[A-Za-z0-9']+", text) + if len(w) > 2 and w.lower() not in _STOPWORDS + ] + +def _normalize_sentence(sentence: str) -> str: + sentence = re.sub(r"\s+", " ", sentence).strip() + if sentence and sentence[-1] not in ".!?": + sentence += "." + return sentence[0].upper() + sentence[1:] if sentence else sentence + +def _extract_summary_items(sentence: str) -> List[str]: + items: List[str] = [] + for token in re.findall(r"[A-Za-z0-9-]+", sentence): + lower = token.lower() + if re.fullmatch(r"CAP-?\d+", token, flags=re.IGNORECASE): + items.append(token.upper()) + continue + if token.isupper() and len(token) > 2: + items.append(token) + continue + if lower in _SUMMARY_KEYWORDS: + items.append(token) + continue + if token[0].isupper() and lower not in _STOPWORDS: + items.append(token) + seen = set() + deduped: List[str] = [] + for item in items: + key = item.lower() + if key in seen: + continue + seen.add(key) + deduped.append(item) + return deduped[:7] + +def _synthesize_point(sentence: str) -> str: + items = _extract_summary_items(sentence) + if items: + return f"Discussion focused on {', '.join(items)}." + return _normalize_sentence(sentence) + +def summarize_key_points(blocks: List[Tuple[int, str]], max_points: int) -> List[str]: + if not blocks or max_points <= 0: + return [] + + text = " ".join(block for _, block in blocks) + sentences = _split_sentences(text) + if not sentences: + return [] + + freqs: Dict[str, int] = {} + for sentence in sentences: + for token in _tokenize(sentence): + freqs[token] = freqs.get(token, 0) + 1 + + scored: List[Tuple[int, float, str, set]] = [] + for idx, sentence in enumerate(sentences): + lowered = sentence.lower() + filler_hits = len(re.findall(r"\b(yeah|okay|ok|um|uh|like|you know|sort of|kind of)\b", lowered)) + tokens = _tokenize(sentence) + if len(tokens) < 5 or len(tokens) > 30: + continue + if filler_hits >= 2: + continue + if len(sentence) < 50: + continue + score = sum(freqs.get(t, 0) for t in tokens) / max(len(tokens), 1) + keyword_bonus = sum(1 for t in tokens if t in _SUMMARY_KEYWORDS) * 0.6 + score += keyword_bonus + scored.append((idx, score, sentence, set(tokens))) + + if not scored: + fallback = _normalize_sentence(sentences[0]) + return [fallback] if fallback else [] + + scored.sort(key=lambda x: (-x[1], x[0])) + picked: List[str] = [] + picked_sets: List[set] = [] + picked_norm: set = set() + for _, _, sentence, token_set in scored: + if len(picked) >= max_points: + break + too_similar = False + for prev_set in picked_sets: + if not prev_set: + continue + overlap = len(prev_set & token_set) / max(len(prev_set | token_set), 1) + if overlap > 0.6: + too_similar = True + break + if too_similar: + continue + synthesized = _synthesize_point(sentence) + synth_norm = re.sub(r"\s+", " ", synthesized).strip().lower() + if synth_norm in picked_norm: + continue + picked.append(synthesized) + picked_norm.add(synth_norm) + picked_sets.append(token_set) + return picked + +def vtt_to_minute_blocks(vtt_path: pathlib.Path, block_seconds: int, punctuate: bool, spellcheck: bool) -> List[Tuple[int, str]]: + import webvtt # pip install webvtt-py + buckets: Dict[int, List[str]] = {} + for cue in webvtt.read(str(vtt_path)): + start_s = _parse_vtt_time(cue.start) + minute_bucket = int(start_s // block_seconds) * block_seconds + clean = _clean_caption_text(cue.text) + if not clean: + continue + items = buckets.setdefault(minute_bucket, []) + if not items or items[-1] != clean: + items.append(clean) + + blocks: List[Tuple[int, str]] = [] + last_norm: Optional[str] = None + for bucket in sorted(buckets.keys()): + joined = " ".join(buckets[bucket]).strip() + if not joined: + continue + joined = _punctuate_text(joined, punctuate) + joined = _spellcheck_text(joined, spellcheck) + joined = _dedupe_repeated_phrases(joined) + norm = re.sub(r"\s+", " ", joined).strip().lower() + if norm and norm == last_norm: + continue + blocks.append((bucket, joined)) + last_norm = norm + return blocks + +def fetch_metadata(video_id: str, args: argparse.Namespace) -> Dict[str, str]: + cmd = [ + sys.executable, + "-m", + "yt_dlp", + "--skip-download", + "--dump-single-json", + f"https://www.youtube.com/watch?v={video_id}", + ] + if args.cookies: + cmd.extend(["--cookies", args.cookies]) + if args.remote_components: + cmd.extend(["--remote-components", args.remote_components]) + if args.js_runtime: + cmd.extend(["--js-runtimes", args.js_runtime]) + if args.impersonate: + cmd.extend(["--impersonate", args.impersonate]) + proc = subprocess.run(cmd, capture_output=True, text=True) + if proc.returncode: + print(f"{video_id}: yt-dlp metadata failed ({proc.returncode})\n{proc.stderr}", file=sys.stderr) + return {} + try: + return json.loads(proc.stdout) + except Exception: + return {} + +def build_mdx( + video_id: str, + title: str, + description: str, + authors: List[str], + tags: List[str], + blocks: List[Tuple[int, str]], + summary_points: List[str], +) -> str: + safe_title = _yaml_escape(title) + safe_description = _yaml_escape(description) + front_matter_lines = [ + "---", + f"title: \"{safe_title}\"", + f"description: \"{safe_description}\"", + "authors:", + ] + for author in authors: + front_matter_lines.append(f" - {author}") + front_matter_lines.append(f"tags: [{', '.join(tags)}]") + front_matter_lines.append("---") + front_matter = "\n".join(front_matter_lines) + + transcript_lines = [] + prev_ended_sentence = True + for offset, text in blocks: + cleaned_text = text + if cleaned_text and not prev_ended_sentence: + cleaned_text = cleaned_text[0].lower() + cleaned_text[1:] + transcript_lines.append(f"[{_format_timestamp(offset)}] {cleaned_text}") + transcript_lines.append("") + prev_ended_sentence = bool(re.search(r"[.!?]$", text.strip())) + + transcript_body = "\n".join(transcript_lines).rstrip() + summary_block = "" + if summary_points: + summary_lines = [f"- {point}" for point in summary_points] + [""] + summary_block = "\n".join(summary_lines) + return ( + f"{front_matter}\n\n" + "import YouTube from \"@site/src/components/YouTube\";\n\n" + f"\n\n" + f"{summary_block}\n\n" + "
\n" + " Video Transcript\n\n" + f"{transcript_body}\n\n" + "
\n" + ) + +def fetch_captions(video_id: str, out_dir: pathlib.Path, args: argparse.Namespace) -> None: + out_dir.mkdir(parents=True, exist_ok=True) + cmd = [ + sys.executable, + "-m", + "yt_dlp", + "--skip-download", + "--write-subs", + "--write-auto-subs", + "--sub-langs", + args.lang, + "--sub-format", + "vtt", + "-o", + str(out_dir / f"{video_id}.%(ext)s"), + f"https://www.youtube.com/watch?v={video_id}", + ] + if args.cookies: + cmd.extend(["--cookies", args.cookies]) + if args.remote_components: + cmd.extend(["--remote-components", args.remote_components]) + if args.js_runtime: + cmd.extend(["--js-runtimes", args.js_runtime]) + if args.impersonate: + cmd.extend(["--impersonate", args.impersonate]) + + proc = subprocess.run(cmd, capture_output=True, text=True) + if proc.returncode: + print(f"{video_id}: yt-dlp failed ({proc.returncode})\n{proc.stderr}", file=sys.stderr) + return + + vtt_files = list(out_dir.glob(f"{video_id}*.vtt")) + if not vtt_files: + print(f"{video_id}: no captions", file=sys.stderr) + return + + vtt_path = max(vtt_files, key=lambda p: p.stat().st_mtime) + if not args.create_page or args.save_txt: + txt_path = out_dir / f"{video_id}.{args.lang}.txt" + txt_path.write_text(vtt_to_text(vtt_path), encoding="utf-8") + print(f"{video_id}: saved {txt_path}") + + if args.create_page: + if not args.authors: + args.authors = "carsten-jacobsen" + metadata = fetch_metadata(video_id, args) + title = args.title or metadata.get("title") or f"Meeting {video_id}" + description = args.description or metadata.get("description", "") + if description: + description = description.strip().replace("\n", " ") + description = re.sub(r"\s+", " ", description)[:180] + tags = [t.strip() for t in (args.tags or "developer").split(",") if t.strip()] + authors = [a.strip() for a in args.authors.split(",") if a.strip()] + + upload_date = metadata.get("upload_date") + date_str = args.date or (f"{upload_date[:4]}-{upload_date[4:6]}-{upload_date[6:]}" if upload_date else None) + if not date_str: + date_str = dt.date.today().isoformat() + + blocks = vtt_to_minute_blocks( + vtt_path, + block_seconds=args.block_seconds, + punctuate=args.punctuate, + spellcheck=args.spellcheck, + ) + summary_points = [] if args.no_summary else summarize_key_points(blocks, args.summary_points) + if (not args.description) and (not description) and summary_points: + description = summary_points[0] + description = description.strip().replace("\n", " ") + description = re.sub(r"\s+", " ", description)[:180] + if not description: + description = "Stellar developer meeting transcript." + + meetings_dir = pathlib.Path(args.meetings_dir) + meetings_dir.mkdir(parents=True, exist_ok=True) + out_path = meetings_dir / f"{date_str}.mdx" + if out_path.exists() and not args.overwrite: + print(f"{video_id}: {out_path} exists (use --overwrite to replace)", file=sys.stderr) + return + out_path.write_text( + build_mdx(video_id, title, description, authors, tags, blocks, summary_points), + encoding="utf-8", + ) + print(f"{video_id}: wrote {out_path}") + + if args.create_page and not args.keep_vtt: + for vtt_file in vtt_files: + try: + vtt_file.unlink() + except OSError: + pass + try: + if out_dir.exists() and not any(out_dir.iterdir()): + out_dir.rmdir() + except OSError: + pass + +def main() -> None: + parser = argparse.ArgumentParser(description="Export YouTube captions to text") + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument("--video", help="YouTube URL or ID") + group.add_argument("--list", help="File with one URL/ID per line") + parser.add_argument("--lang", default="en") + parser.add_argument("--out", default="transcripts_out") + parser.add_argument("--cookies", help="Path to cookies.txt for authenticated captions") + parser.add_argument("--remote-components") + parser.add_argument("--js-runtime") + parser.add_argument("--impersonate") + parser.add_argument("--create-page", action="store_true", default=True, help="Create meetings mdx page with transcript") + parser.add_argument("--no-create-page", action="store_false", dest="create_page", help="Only export captions to text") + parser.add_argument("--save-txt", action="store_true", help="Also save a plain text transcript") + parser.add_argument("--keep-vtt", action="store_true", help="Keep downloaded VTT files") + parser.add_argument("--meetings-dir", default="meetings") + parser.add_argument("--title") + parser.add_argument("--description") + parser.add_argument("--authors", help="Comma-separated author ids from meetings/authors.yml") + parser.add_argument("--tags", help="Comma-separated tags (default: developer)") + parser.add_argument("--date", help="YYYY-MM-DD; defaults to upload date") + parser.add_argument("--overwrite", action="store_true") + parser.add_argument("--block-seconds", type=int, default=60) + parser.add_argument("--punctuate", action="store_true", default=True) + parser.add_argument("--no-punctuate", action="store_false", dest="punctuate") + parser.add_argument("--spellcheck", action="store_true", default=True) + parser.add_argument("--no-spellcheck", action="store_false", dest="spellcheck") + parser.add_argument("--summary-points", type=int, default=4, help="Number of summary bullets to include") + parser.add_argument("--no-summary", action="store_true", help="Skip summary generation") + args = parser.parse_args() + + if not args.video and not args.list: + args.video = input("YouTube video ID or URL: ").strip() or None + if args.create_page and not args.authors: + args.authors = input("Authors (comma-separated, from authors.yml): ").strip() or None + if args.create_page and not args.tags: + args.tags = input("Tags (comma-separated, default: developer): ").strip() or "developer" + + inputs = [args.video] if args.video else list(read_list(pathlib.Path(args.list))) + video_ids = [vid for vid in (extract_video_id(item) for item in inputs) if vid] + if not video_ids: + parser.error("no valid video IDs found") + + out_dir = pathlib.Path(args.out) + for vid in video_ids: + fetch_captions(vid, out_dir, args) + +if __name__ == "__main__": + main() diff --git a/nginx/includes/redirects.conf b/nginx/includes/redirects.conf index f01e71506d..2695c8e659 100644 --- a/nginx/includes/redirects.conf +++ b/nginx/includes/redirects.conf @@ -287,3 +287,7 @@ rewrite ^/docs/build/guides/basics/send-and-receive-payments$ "/docs/build/guide rewrite ^/docs/learn/fundamentals/contract-development/events$ "/docs/learn/fundamentals/stellar-data-structures/events" permanent; rewrite ^/docs/learn/encyclopedia$ /docs/learn/fundamentals permanent; + +# Meetings: old slug formats +rewrite ^/meeting-notes/([0-9]{4})-([0-9]{2})-([0-9]{2})/?$ "/meetings/$1/$2/$3" permanent; +rewrite ^/meeting-notes(.*)$ "/meetings$1" permanent; diff --git a/openrpc/src/anchor-platform/methods/do_stellar_payment.json b/openrpc/src/anchor-platform/methods/do_stellar_payment.json index 4fef7e64cb..cf12887ed9 100644 --- a/openrpc/src/anchor-platform/methods/do_stellar_payment.json +++ b/openrpc/src/anchor-platform/methods/do_stellar_payment.json @@ -1,7 +1,7 @@ { "name": "do_stellar_payment", "summary": "Submits a Stellar payment", - "description": "Submits a payment to a stellar network by a custody service.", + "description": "Submits a payment to the Stellar network by a custody service.", "paramStructure": "by-name", "tags": [ { "name": "SEP-6" }, diff --git a/openrpc/src/anchor-platform/methods/do_stellar_refund.json b/openrpc/src/anchor-platform/methods/do_stellar_refund.json index e20a9cbc06..1e7a87d6db 100644 --- a/openrpc/src/anchor-platform/methods/do_stellar_refund.json +++ b/openrpc/src/anchor-platform/methods/do_stellar_refund.json @@ -1,7 +1,7 @@ { "name": "do_stellar_refund", "summary": "Submits a Stellar refund", - "description": "Submits a refund payment to a stellar network by a custody service", + "description": "Submits a refund payment to the Stellar network by a custody service", "paramStructure": "by-name", "tags": [ { "name": "SEP-6" }, diff --git a/package.json b/package.json index 48494536af..90171258ff 100644 --- a/package.json +++ b/package.json @@ -19,10 +19,10 @@ "api": "yarn api:clean && yarn api:bundle && yarn api:gen", "write-translations": "docusaurus write-translations", "write-heading-ids": "docusaurus write-heading-ids", - "format:mdx": "prettier --config .prettierrc.js --write \"{docs,src/pages,meeting-notes}/**/*.{md,mdx}\"", - "ci-format:mdx": "prettier --config .prettierrc.js --write --log-level silent \"{docs,src/pages,meeting-notes}/**/*.{md,mdx}\"", - "check:mdx": "prettier --config .prettierrc.js -c \"{docs,src/pages,meeting-notes}/**/*.{md,mdx}\"", - "ci:mdx": "prettier --config .prettierrc.js \"{docs,src/pages,meeting-notes}/**/*.{md,mdx}\" -l --no-editorconfig", + "format:mdx": "prettier --config .prettierrc.js --write \"{docs,src/pages,meetings}/**/*.{md,mdx}\"", + "ci-format:mdx": "prettier --config .prettierrc.js --write --log-level silent \"{docs,src/pages,meetings}/**/*.{md,mdx}\"", + "check:mdx": "prettier --config .prettierrc.js -c \"{docs,src/pages,meetings}/**/*.{md,mdx}\"", + "ci:mdx": "prettier --config .prettierrc.js \"{docs,src/pages,meetings}/**/*.{md,mdx}\" -l --no-editorconfig", "diff:mdx": "yarn ci-format:mdx && git diff -- . ':(exclude)package-lock.json' ':(exclude)package.json' ':(exclude)yarn.lock' | awk \"/diff --git/ {found=1} found {print}\"", "lint:fix": "eslint \"src/**/*.{js,jsx,ts,tsx}\" --fix", "lint": "eslint \"src/**/*.{js,jsx,ts,tsx}\"", @@ -83,8 +83,9 @@ "@eslint/js": "9.39.2", "@redocly/cli": "2.14.1", "@stellar/prettier-config": "^1.2.0", + "cross-env": "^10.1.0", "eslint": "9.39.2", - "glob":"^13.0.0", + "glob": "^13.0.0", "husky": "^9.1.7", "prettier": "3.7.4", "typescript": "5.9.3" diff --git a/patches/@docusaurus+plugin-google-gtag+3.9.2.patch b/patches/@docusaurus+plugin-google-gtag+3.9.2.patch new file mode 100644 index 0000000000..7bbfa6803c --- /dev/null +++ b/patches/@docusaurus+plugin-google-gtag+3.9.2.patch @@ -0,0 +1,14 @@ +diff --git a/node_modules/@docusaurus/plugin-google-gtag/lib/gtag.js b/node_modules/@docusaurus/plugin-google-gtag/lib/gtag.js +index dd152ea1..dd7a7914 100644 +--- a/node_modules/@docusaurus/plugin-google-gtag/lib/gtag.js ++++ b/node_modules/@docusaurus/plugin-google-gtag/lib/gtag.js +@@ -18,6 +18,9 @@ const clientModule = { + setTimeout(() => { + // Always refer to the variable on window in case it gets overridden + // elsewhere. ++ if (typeof window.gtag !== 'function') { ++ return; ++ } + window.gtag('set', 'page_path', location.pathname + location.search + location.hash); + window.gtag('event', 'page_view'); + }); diff --git a/patches/@stoplight+mosaic+1.53.4.patch b/patches/@stoplight+mosaic+1.53.4.patch new file mode 100644 index 0000000000..9ea7e665a1 --- /dev/null +++ b/patches/@stoplight+mosaic+1.53.4.patch @@ -0,0 +1,62 @@ +diff --git a/node_modules/@stoplight/mosaic/core.esm.js b/node_modules/@stoplight/mosaic/core.esm.js +index 1d4db8c..8b07522 100644 +--- a/node_modules/@stoplight/mosaic/core.esm.js ++++ b/node_modules/@stoplight/mosaic/core.esm.js +@@ -5645,6 +5645,9 @@ const memoryStorage = { + getItem: name => memoryDb[name], + setItem: (name, value) => { + memoryDb[name] = value; ++ }, ++ removeItem: name => { ++ delete memoryDb[name]; + } + }; + +@@ -5655,6 +5658,7 @@ const defaultMode = () => { + return JSON.parse(localStorage.getItem(THEME_STORAGE_KEY)).mode; + } catch (_a) {} + ++ if (typeof document === 'undefined') return DEFAULT_THEME_MODE; + const dataTheme = document.documentElement.getAttribute('data-theme'); + if (dataTheme) return dataTheme; + return DEFAULT_THEME_MODE; +@@ -5736,7 +5740,7 @@ const useThemeStore = create(persist(set => ({ + }), { + name: THEME_STORAGE_KEY, + version: 0, +- getStorage: () => typeof localStorage === 'undefined' ? memoryStorage : localStorage, ++ getStorage: () => typeof localStorage === 'undefined' || typeof localStorage.removeItem !== 'function' ? memoryStorage : localStorage, + // only remember the desired mode + serialize: ({ + state, +diff --git a/node_modules/@stoplight/mosaic/core.umd.js b/node_modules/@stoplight/mosaic/core.umd.js +index 6a21eba..a4c2cec 100644 +--- a/node_modules/@stoplight/mosaic/core.umd.js ++++ b/node_modules/@stoplight/mosaic/core.umd.js +@@ -5683,6 +5683,9 @@ + getItem: name => memoryDb[name], + setItem: (name, value) => { + memoryDb[name] = value; ++ }, ++ removeItem: name => { ++ delete memoryDb[name]; + } + }; + +@@ -5693,6 +5696,7 @@ + return JSON.parse(localStorage.getItem(THEME_STORAGE_KEY)).mode; + } catch (_a) {} + ++ if (typeof document === 'undefined') return DEFAULT_THEME_MODE; + const dataTheme = document.documentElement.getAttribute('data-theme'); + if (dataTheme) return dataTheme; + return DEFAULT_THEME_MODE; +@@ -5774,7 +5778,7 @@ + }), { + name: THEME_STORAGE_KEY, + version: 0, +- getStorage: () => typeof localStorage === 'undefined' ? memoryStorage : localStorage, ++ getStorage: () => typeof localStorage === 'undefined' || typeof localStorage.removeItem !== 'function' ? memoryStorage : localStorage, + // only remember the desired mode + serialize: ({ + state, diff --git a/patches/zustand+3.7.2.patch b/patches/zustand+3.7.2.patch new file mode 100644 index 0000000000..cdb682f4f6 --- /dev/null +++ b/patches/zustand+3.7.2.patch @@ -0,0 +1,79 @@ +diff --git a/node_modules/zustand/middleware.js b/node_modules/zustand/middleware.js +index 540a2c11..7845315e 100644 +--- a/node_modules/zustand/middleware.js ++++ b/node_modules/zustand/middleware.js +@@ -330,7 +330,7 @@ var persist = function persist(config, baseOptions) { + set.apply(void 0, arguments); + }, get, api); + } else if (!storage.removeItem) { +- console.warn("[zustand persist middleware] The given storage for item '" + options.name + "' does not contain a 'removeItem' method, which will be required in v4."); ++ storage.removeItem = function () {}; + } + + var thenableSerialize = toThenable(options.serialize); +diff --git a/node_modules/zustand/esm/middleware.js b/node_modules/zustand/esm/middleware.js +index 9018b426..5951b57a 100644 +--- a/node_modules/zustand/esm/middleware.js ++++ b/node_modules/zustand/esm/middleware.js +@@ -264,7 +264,7 @@ const persist = (config, baseOptions) => (set, get, api) => { + set(...args); + }, get, api); + } else if (!storage.removeItem) { +- console.warn(`[zustand persist middleware] The given storage for item '${options.name}' does not contain a 'removeItem' method, which will be required in v4.`); ++ storage.removeItem = () => {}; + } + const thenableSerialize = toThenable(options.serialize); + const setItem = () => { +diff --git a/node_modules/zustand/esm/middleware.mjs b/node_modules/zustand/esm/middleware.mjs +index 9018b426..5951b57a 100644 +--- a/node_modules/zustand/esm/middleware.mjs ++++ b/node_modules/zustand/esm/middleware.mjs +@@ -264,7 +264,7 @@ const persist = (config, baseOptions) => (set, get, api) => { + set(...args); + }, get, api); + } else if (!storage.removeItem) { +- console.warn(`[zustand persist middleware] The given storage for item '${options.name}' does not contain a 'removeItem' method, which will be required in v4.`); ++ storage.removeItem = () => {}; + } + const thenableSerialize = toThenable(options.serialize); + const setItem = () => { +diff --git a/node_modules/zustand/system/middleware.development.js b/node_modules/zustand/system/middleware.development.js +index 3822299b..334147d9 100644 +--- a/node_modules/zustand/system/middleware.development.js ++++ b/node_modules/zustand/system/middleware.development.js +@@ -271,7 +271,7 @@ System.register([], (function (exports) { + set(...args); + }, get, api); + } else if (!storage.removeItem) { +- console.warn(`[zustand persist middleware] The given storage for item '${options.name}' does not contain a 'removeItem' method, which will be required in v4.`); ++ storage.removeItem = () => {}; + } + const thenableSerialize = toThenable(options.serialize); + const setItem = () => { +diff --git a/node_modules/zustand/umd/middleware.production.js b/node_modules/zustand/umd/middleware.production.js +index 852d9387..876b613f 100644 +--- a/node_modules/zustand/umd/middleware.production.js ++++ b/node_modules/zustand/umd/middleware.production.js +@@ -1 +1 @@ +-!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports):"function"==typeof define&&define.amd?define(["exports"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self).zustandMiddleware={})}(this,(function(e){"use strict";function t(){return t=Object.assign||function(e){for(var t=1;tt in n?P(n,t,{enumerable:!0,configurable:!0,writable:!0,value:e}):n[t]=e,x=(n,t)=>{for(var e in t||(t={}))N.call(t,e)&&T(n,e,t[e]);if(E)for(var e of E(t))D.call(t,e)&&T(n,e,t[e]);return n};const C=O("redux",(n,t)=>(e,i,s)=>(s.dispatch=a=>(e(p=>n(p,a),!1,a),a),s.dispatchFromDevtools=!0,x({dispatch:(...a)=>s.dispatch(...a)},t)));function A(n,t){return(e,i,s)=>{var a;let p=!1;typeof t=="string"&&!p&&(console.warn("[zustand devtools middleware]: passing `name` as directly will be not allowed in next majorpass the `name` in an object `{ name: ... }` instead"),p=!0);const u=t===void 0?{name:void 0,anonymousActionType:void 0}:typeof t=="string"?{name:t}:t;typeof((a=u==null?void 0:u.serialize)==null?void 0:a.options)<"u"&&console.warn("[zustand devtools middleware]: `serialize.options` is deprecated, just use `serialize`");let f;try{f=window.__REDUX_DEVTOOLS_EXTENSION__||window.top.__REDUX_DEVTOOLS_EXTENSION__}catch{}if(!f)return n(e,i,s);let l=Object.create(f.connect(u)),v=!1;Object.defineProperty(s,"devtools",{get:()=>(v||(console.warn("[zustand devtools middleware] `devtools` property on the store is deprecated it will be removed in the next major.\nYou shouldn't interact with the extension directly. But in case you still want to you can patch `window.__REDUX_DEVTOOLS_EXTENSION__` directly"),v=!0),l),set:d=>{v||(console.warn("[zustand devtools middleware] `api.devtools` is deprecated, it will be removed in the next major.\nYou shouldn't interact with the extension directly. But in case you still want to you can patch `window.__REDUX_DEVTOOLS_EXTENSION__` directly"),v=!0),l=d}});let y=!1;Object.defineProperty(l,"prefix",{get:()=>(y||(console.warn("[zustand devtools middleware] along with `api.devtools`, `api.devtools.prefix` is deprecated.\nWe no longer prefix the actions/names"+u.name===void 0?", pass the `name` option to create a separate instance of devtools for each store.":", because the `name` option already creates a separate instance of devtools for each store."),y=!0),""),set:()=>{y||(console.warn("[zustand devtools middleware] along with `api.devtools`, `api.devtools.prefix` is deprecated.\nWe no longer prefix the actions/names"+u.name===void 0?", pass the `name` option to create a separate instance of devtools for each store.":", because the `name` option already creates a separate instance of devtools for each store."),y=!0)}});let h=!0;s.setState=(d,r,o)=>{e(d,r),h&&l.send(o===void 0?{type:u.anonymousActionType||"anonymous"}:typeof o=="string"?{type:o}:o,i())};const w=(...d)=>{const r=h;h=!1,e(...d),h=r},b=n(s.setState,i,s);if(l.init(b),s.dispatchFromDevtools&&typeof s.dispatch=="function"){let d=!1;const r=s.dispatch;s.dispatch=(...o)=>{o[0].type==="__setState"&&!d&&(console.warn('[zustand devtools middleware] "__setState" action type is reserved to set state from the devtools. Avoid using it.'),d=!0),r(...o)}}return l.subscribe(d=>{var r;switch(d.type){case"ACTION":if(typeof d.payload!="string"){console.error("[zustand devtools middleware] Unsupported action format");return}return _(d.payload,o=>{if(o.type==="__setState"){w(o.state);return}!s.dispatchFromDevtools||typeof s.dispatch=="function"&&s.dispatch(o)});case"DISPATCH":switch(d.payload.type){case"RESET":return w(b),l.init(s.getState());case"COMMIT":return l.init(s.getState());case"ROLLBACK":return _(d.state,o=>{w(o),l.init(s.getState())});case"JUMP_TO_STATE":case"JUMP_TO_ACTION":return _(d.state,o=>{w(o)});case"IMPORT_STATE":{const{nextLiftedState:o}=d.payload,c=(r=o.computedStates.slice(-1)[0])==null?void 0:r.state;if(!c)return;w(c),l.send(null,o);return}case"PAUSE_RECORDING":return h=!h}return}}),b}}const _=(n,t)=>{let e;try{e=JSON.parse(n)}catch(i){console.error("[zustand devtools middleware] Could not parse the received json",i)}e!==void 0&&t(e)},L=O("subscribeWithSelector",n=>(t,e,i)=>{const s=i.subscribe;return i.subscribe=(a,p,u)=>{let f=a;if(p){const l=(u==null?void 0:u.equalityFn)||Object.is;let v=a(i.getState());f=y=>{const h=a(y);if(!l(v,h)){const w=v;p(v=h,w)}},u!=null&&u.fireImmediately&&p(v,v)}return s(f)},n(t,e,i)}),k=O("combine",(n,t)=>(e,i,s)=>Object.assign({},n,t(e,i,s)));var R=Object.defineProperty,z=Object.getOwnPropertySymbols,U=Object.prototype.hasOwnProperty,X=Object.prototype.propertyIsEnumerable,j=(n,t,e)=>t in n?R(n,t,{enumerable:!0,configurable:!0,writable:!0,value:e}):n[t]=e,g=(n,t)=>{for(var e in t||(t={}))U.call(t,e)&&j(n,e,t[e]);if(z)for(var e of z(t))X.call(t,e)&&j(n,e,t[e]);return n};const S=n=>t=>{try{const e=n(t);return e instanceof Promise?e:{then(i){return S(i)(e)},catch(i){return this}}}catch(e){return{then(i){return this},catch(i){return S(i)(e)}}}},F=O("persist",(n,t)=>(e,i,s)=>{let a=g({getStorage:()=>localStorage,serialize:JSON.stringify,deserialize:JSON.parse,partialize:r=>r,version:0,merge:(r,o)=>g(g({},o),r)},t);(a.blacklist||a.whitelist)&&console.warn(`The ${a.blacklist?"blacklist":"whitelist"} option is deprecated and will be removed in the next version. Please use the 'partialize' option instead.`);let p=!1;const u=new Set,f=new Set;let l;try{l=a.getStorage()}catch{}if(l)l.removeItem||console.warn(`[zustand persist middleware] The given storage for item '${a.name}' does not contain a 'removeItem' method, which will be required in v4.`);else return n((...r)=>{console.warn(`[zustand persist middleware] Unable to update item '${a.name}', the given storage is currently unavailable.`),e(...r)},i,s);const v=S(a.serialize),y=()=>{const r=a.partialize(g({},i()));a.whitelist&&Object.keys(r).forEach(m=>{var I;!((I=a.whitelist)!=null&&I.includes(m))&&delete r[m]}),a.blacklist&&a.blacklist.forEach(m=>delete r[m]);let o;const c=v({state:r,version:a.version}).then(m=>l.setItem(a.name,m)).catch(m=>{o=m});if(o)throw o;return c},h=s.setState;s.setState=(r,o)=>{h(r,o),y()};const w=n((...r)=>{e(...r),y()},i,s);let b;const d=()=>{var r;if(!l)return;p=!1,u.forEach(c=>c(i()));const o=((r=a.onRehydrateStorage)==null?void 0:r.call(a,i()))||void 0;return S(l.getItem.bind(l))(a.name).then(c=>{if(c)return a.deserialize(c)}).then(c=>{if(c)if(typeof c.version=="number"&&c.version!==a.version){if(a.migrate)return a.migrate(c.state,c.version);console.error("State loaded from storage couldn't be migrated since no migrate function was provided")}else return c.state}).then(c=>{var m;return b=a.merge(c,(m=i())!=null?m:w),e(b,!0),y()}).then(()=>{o==null||o(b,void 0),p=!0,f.forEach(c=>c(b))}).catch(c=>{o==null||o(void 0,c)})};return s.persist={setOptions:r=>{a=g(g({},a),r),r.getStorage&&(l=r.getStorage())},clearStorage:()=>{var r;(r=l==null?void 0:l.removeItem)==null||r.call(l,a.name)},rehydrate:()=>d(),hasHydrated:()=>p,onHydrate:r=>(u.add(r),()=>{u.delete(r)}),onFinishHydration:r=>(f.add(r),()=>{f.delete(r)})},d(),b||w})}}}); ++System.register([],function(O){"use strict";return{execute:function(){O("devtools",A);var P=Object.defineProperty,E=Object.getOwnPropertySymbols,N=Object.prototype.hasOwnProperty,D=Object.prototype.propertyIsEnumerable,T=(n,t,e)=>t in n?P(n,t,{enumerable:!0,configurable:!0,writable:!0,value:e}):n[t]=e,x=(n,t)=>{for(var e in t||(t={}))N.call(t,e)&&T(n,e,t[e]);if(E)for(var e of E(t))D.call(t,e)&&T(n,e,t[e]);return n};const C=O("redux",(n,t)=>(e,i,s)=>(s.dispatch=a=>(e(p=>n(p,a),!1,a),a),s.dispatchFromDevtools=!0,x({dispatch:(...a)=>s.dispatch(...a)},t)));function A(n,t){return(e,i,s)=>{var a;let p=!1;typeof t=="string"&&!p&&(console.warn("[zustand devtools middleware]: passing `name` as directly will be not allowed in next majorpass the `name` in an object `{ name: ... }` instead"),p=!0);const u=t===void 0?{name:void 0,anonymousActionType:void 0}:typeof t=="string"?{name:t}:t;typeof((a=u==null?void 0:u.serialize)==null?void 0:a.options)<"u"&&console.warn("[zustand devtools middleware]: `serialize.options` is deprecated, just use `serialize`");let f;try{f=window.__REDUX_DEVTOOLS_EXTENSION__||window.top.__REDUX_DEVTOOLS_EXTENSION__}catch{}if(!f)return n(e,i,s);let l=Object.create(f.connect(u)),v=!1;Object.defineProperty(s,"devtools",{get:()=>(v||(console.warn("[zustand devtools middleware] `devtools` property on the store is deprecated it will be removed in the next major.\nYou shouldn't interact with the extension directly. But in case you still want to you can patch `window.__REDUX_DEVTOOLS_EXTENSION__` directly"),v=!0),l),set:d=>{v||(console.warn("[zustand devtools middleware] `api.devtools` is deprecated, it will be removed in the next major.\nYou shouldn't interact with the extension directly. But in case you still want to you can patch `window.__REDUX_DEVTOOLS_EXTENSION__` directly"),v=!0),l=d}});let y=!1;Object.defineProperty(l,"prefix",{get:()=>(y||(console.warn("[zustand devtools middleware] along with `api.devtools`, `api.devtools.prefix` is deprecated.\nWe no longer prefix the actions/names"+u.name===void 0?", pass the `name` option to create a separate instance of devtools for each store.":", because the `name` option already creates a separate instance of devtools for each store."),y=!0),""),set:()=>{y||(console.warn("[zustand devtools middleware] along with `api.devtools`, `api.devtools.prefix` is deprecated.\nWe no longer prefix the actions/names"+u.name===void 0?", pass the `name` option to create a separate instance of devtools for each store.":", because the `name` option already creates a separate instance of devtools for each store."),y=!0)}});let h=!0;s.setState=(d,r,o)=>{e(d,r),h&&l.send(o===void 0?{type:u.anonymousActionType||"anonymous"}:typeof o=="string"?{type:o}:o,i())};const w=(...d)=>{const r=h;h=!1,e(...d),h=r},b=n(s.setState,i,s);if(l.init(b),s.dispatchFromDevtools&&typeof s.dispatch=="function"){let d=!1;const r=s.dispatch;s.dispatch=(...o)=>{o[0].type==="__setState"&&!d&&(console.warn('[zustand devtools middleware] "__setState" action type is reserved to set state from the devtools. Avoid using it.'),d=!0),r(...o)}}return l.subscribe(d=>{var r;switch(d.type){case"ACTION":if(typeof d.payload!="string"){console.error("[zustand devtools middleware] Unsupported action format");return}return _(d.payload,o=>{if(o.type==="__setState"){w(o.state);return}!s.dispatchFromDevtools||typeof s.dispatch=="function"&&s.dispatch(o)});case"DISPATCH":switch(d.payload.type){case"RESET":return w(b),l.init(s.getState());case"COMMIT":return l.init(s.getState());case"ROLLBACK":return _(d.state,o=>{w(o),l.init(s.getState())});case"JUMP_TO_STATE":case"JUMP_TO_ACTION":return _(d.state,o=>{w(o)});case"IMPORT_STATE":{const{nextLiftedState:o}=d.payload,c=(r=o.computedStates.slice(-1)[0])==null?void 0:r.state;if(!c)return;w(c),l.send(null,o);return}case"PAUSE_RECORDING":return h=!h}return}}),b}}const _=(n,t)=>{let e;try{e=JSON.parse(n)}catch(i){console.error("[zustand devtools middleware] Could not parse the received json",i)}e!==void 0&&t(e)},L=O("subscribeWithSelector",n=>(t,e,i)=>{const s=i.subscribe;return i.subscribe=(a,p,u)=>{let f=a;if(p){const l=(u==null?void 0:u.equalityFn)||Object.is;let v=a(i.getState());f=y=>{const h=a(y);if(!l(v,h)){const w=v;p(v=h,w)}},u!=null&&u.fireImmediately&&p(v,v)}return s(f)},n(t,e,i)}),k=O("combine",(n,t)=>(e,i,s)=>Object.assign({},n,t(e,i,s)));var R=Object.defineProperty,z=Object.getOwnPropertySymbols,U=Object.prototype.hasOwnProperty,X=Object.prototype.propertyIsEnumerable,j=(n,t,e)=>t in n?R(n,t,{enumerable:!0,configurable:!0,writable:!0,value:e}):n[t]=e,g=(n,t)=>{for(var e in t||(t={}))U.call(t,e)&&j(n,e,t[e]);if(z)for(var e of z(t))X.call(t,e)&&j(n,e,t[e]);return n};const S=n=>t=>{try{const e=n(t);return e instanceof Promise?e:{then(i){return S(i)(e)},catch(i){return this}}}catch(e){return{then(i){return this},catch(i){return S(i)(e)}}}},F=O("persist",(n,t)=>(e,i,s)=>{let a=g({getStorage:()=>localStorage,serialize:JSON.stringify,deserialize:JSON.parse,partialize:r=>r,version:0,merge:(r,o)=>g(g({},o),r)},t);(a.blacklist||a.whitelist)&&console.warn(`The ${a.blacklist?"blacklist":"whitelist"} option is deprecated and will be removed in the next version. Please use the 'partialize' option instead.`);let p=!1;const u=new Set,f=new Set;let l;try{l=a.getStorage()}catch{}if(l)l.removeItem||(l.removeItem=function(){});else return n((...r)=>{console.warn(`[zustand persist middleware] Unable to update item '${a.name}', the given storage is currently unavailable.`),e(...r)},i,s);const v=S(a.serialize),y=()=>{const r=a.partialize(g({},i()));a.whitelist&&Object.keys(r).forEach(m=>{var I;!((I=a.whitelist)!=null&&I.includes(m))&&delete r[m]}),a.blacklist&&a.blacklist.forEach(m=>delete r[m]);let o;const c=v({state:r,version:a.version}).then(m=>l.setItem(a.name,m)).catch(m=>{o=m});if(o)throw o;return c},h=s.setState;s.setState=(r,o)=>{h(r,o),y()};const w=n((...r)=>{e(...r),y()},i,s);let b;const d=()=>{var r;if(!l)return;p=!1,u.forEach(c=>c(i()));const o=((r=a.onRehydrateStorage)==null?void 0:r.call(a,i()))||void 0;return S(l.getItem.bind(l))(a.name).then(c=>{if(c)return a.deserialize(c)}).then(c=>{if(c)if(typeof c.version=="number"&&c.version!==a.version){if(a.migrate)return a.migrate(c.state,c.version);console.error("State loaded from storage couldn't be migrated since no migrate function was provided")}else return c.state}).then(c=>{var m;return b=a.merge(c,(m=i())!=null?m:w),e(b,!0),y()}).then(()=>{o==null||o(b,void 0),p=!0,f.forEach(c=>c(b))}).catch(c=>{o==null||o(void 0,c)})};return s.persist={setOptions:r=>{a=g(g({},a),r),r.getStorage&&(l=r.getStorage())},clearStorage:()=>{var r;(r=l==null?void 0:l.removeItem)==null||r.call(l,a.name)},rehydrate:()=>d(),hasHydrated:()=>p,onHydrate:r=>(u.add(r),()=>{u.delete(r)}),onFinishHydration:r=>(f.add(r),()=>{f.delete(r)})},d(),b||w})}}}); +diff --git a/node_modules/zustand/umd/middleware.development.js b/node_modules/zustand/umd/middleware.development.js +index 2f6ef5bf..6615e3bb 100644 +--- a/node_modules/zustand/umd/middleware.development.js ++++ b/node_modules/zustand/umd/middleware.development.js +@@ -332,7 +332,7 @@ + set.apply(void 0, arguments); + }, get, api); + } else if (!storage.removeItem) { +- console.warn("[zustand persist middleware] The given storage for item '" + options.name + "' does not contain a 'removeItem' method, which will be required in v4."); ++ storage.removeItem = function () {}; + } + + var thenableSerialize = toThenable(options.serialize); diff --git a/routes.txt b/routes.txt index 6fe9d03c98..661e1c8219 100644 --- a/routes.txt +++ b/routes.txt @@ -531,6 +531,7 @@ /docs/networks /docs/networks/resource-limits-fees /docs/networks/software-versions +/docs/notes /docs/platforms /docs/platforms/anchor-platform /docs/platforms/anchor-platform/admin-guide @@ -818,10 +819,85 @@ /docs/validators/admin-guide/soroban-settings /docs/validators/tier-1-orgs /meetings +/meetings/2019/01/24 +/meetings/2019/03/07 +/meetings/2019/03/14 +/meetings/2019/03/21 +/meetings/2019/03/28 +/meetings/2019/04/04 +/meetings/2019/06/13 +/meetings/2019/06/27 +/meetings/2019/07/25 +/meetings/2019/11/04 +/meetings/2020/04/10 +/meetings/2020/04/16 +/meetings/2020/04/24 +/meetings/2020/04/30 +/meetings/2020/05/08 +/meetings/2020/05/22 +/meetings/2020/06/04 +/meetings/2020/06/05 +/meetings/2020/06/26 +/meetings/2020/07/09 +/meetings/2020/07/10 +/meetings/2020/07/15 +/meetings/2020/07/24 +/meetings/2020/08/14 +/meetings/2020/08/28 +/meetings/2020/09/11 +/meetings/2020/10/09 +/meetings/2020/10/29 +/meetings/2021/01/14 +/meetings/2021/01/28 +/meetings/2021/03/11 +/meetings/2021/04/15 +/meetings/2021/04/22 +/meetings/2021/05/06 +/meetings/2021/06/17 +/meetings/2021/07/28 +/meetings/2021/07/29 +/meetings/2021/09/09 +/meetings/2021/10/14 +/meetings/2021/10/28 +/meetings/2021/11/12 +/meetings/2021/11/19 +/meetings/2022/02/03 +/meetings/2022/02/17 +/meetings/2022/03/03 +/meetings/2022/03/11 +/meetings/2022/03/15 +/meetings/2022/04/28 +/meetings/2022/05/05 +/meetings/2022/05/12 +/meetings/2022/05/17 +/meetings/2022/05/19 +/meetings/2022/05/26 +/meetings/2022/06/02 +/meetings/2022/06/23 +/meetings/2022/07/01 +/meetings/2022/07/12 +/meetings/2022/08/31 +/meetings/2022/09/16 +/meetings/2022/11/17 +/meetings/2022/12/07 +/meetings/2022/12/12 +/meetings/2022/12/15 +/meetings/2022/12/22 +/meetings/2023/01/12 +/meetings/2023/02/23 +/meetings/2023/03/02 +/meetings/2023/03/09 +/meetings/2023/03/16 +/meetings/2023/03/30 +/meetings/2023/04/06 +/meetings/2023/04/13 +/meetings/2023/04/20 +/meetings/2023/04/27 +/meetings/2023/05/04 /meetings/2024/01/18 /meetings/2024/01/26 /meetings/2024/02/01 -/meetings/2024/02/09 +/meetings/2024/02/08 /meetings/2024/02/15 /meetings/2024/02/22 /meetings/2024/02/29 @@ -840,7 +916,9 @@ /meetings/2024/06/27 /meetings/2024/07/11 /meetings/2024/07/18 +/meetings/2024/07/24 /meetings/2024/07/25 +/meetings/2024/07/31 /meetings/2024/08/01 /meetings/2024/08/08 /meetings/2024/08/15 @@ -880,23 +958,146 @@ /meetings/2025/10/23 /meetings/2025/10/30 /meetings/2025/11/06 +/meetings/2026/01/22 +/meetings/2026/01/29 /meetings/archive /meetings/authors -/meetings/authors/carstenjacobsen -/meetings/authors/carstenjacobsen/authors/2 -/meetings/authors/carstenjacobsen/authors/3 -/meetings/authors/elliotfriend +/meetings/authors/anke-liu +/meetings/authors/carsten-jacobsen +/meetings/authors/carsten-jacobsen/authors/2 +/meetings/authors/carsten-jacobsen/authors/3 +/meetings/authors/david-mazieres +/meetings/authors/david-mazieres/authors/2 +/meetings/authors/david-mazieres/authors/3 +/meetings/authors/elliot-voris +/meetings/authors/garand-tyson +/meetings/authors/jake-urban +/meetings/authors/justin-rice +/meetings/authors/justin-rice/authors/2 +/meetings/authors/justin-rice/authors/3 /meetings/authors/kalepail +/meetings/authors/kalepail/authors/2 +/meetings/authors/tomer-weller +/meetings/authors/tomer-weller/authors/2 +/meetings/authors/tomer-weller/authors/3 +/meetings/authors/tomer-weller/authors/4 +/meetings/page/10 +/meetings/page/11 +/meetings/page/12 /meetings/page/2 /meetings/page/3 /meetings/page/4 /meetings/page/5 /meetings/page/6 +/meetings/page/7 +/meetings/page/8 +/meetings/page/9 /meetings/tags +/meetings/tags/cap-10 +/meetings/tags/cap-11 +/meetings/tags/cap-13 +/meetings/tags/cap-14 +/meetings/tags/cap-15 +/meetings/tags/cap-16 +/meetings/tags/cap-17 +/meetings/tags/cap-18 +/meetings/tags/cap-19 +/meetings/tags/cap-20 +/meetings/tags/cap-21 +/meetings/tags/cap-22 +/meetings/tags/cap-23 +/meetings/tags/cap-24 +/meetings/tags/cap-25 +/meetings/tags/cap-27 +/meetings/tags/cap-28 +/meetings/tags/cap-29 +/meetings/tags/cap-30 +/meetings/tags/cap-31 +/meetings/tags/cap-33 +/meetings/tags/cap-34 +/meetings/tags/cap-35 +/meetings/tags/cap-37 +/meetings/tags/cap-38 +/meetings/tags/cap-40 +/meetings/tags/cap-42 +/meetings/tags/cap-44 +/meetings/tags/cap-46 +/meetings/tags/cap-46-1 +/meetings/tags/cap-46-2 +/meetings/tags/cap-46-3 +/meetings/tags/cap-46-5 +/meetings/tags/cap-46-6 +/meetings/tags/cap-46-7 +/meetings/tags/cap-46-9 +/meetings/tags/cap-48 +/meetings/tags/cap-49 +/meetings/tags/cap-5 +/meetings/tags/cap-50 +/meetings/tags/cap-51 +/meetings/tags/cap-52 +/meetings/tags/cap-53 +/meetings/tags/cap-54 +/meetings/tags/cap-55 +/meetings/tags/cap-56 +/meetings/tags/cap-57 +/meetings/tags/cap-58 +/meetings/tags/cap-59 +/meetings/tags/cap-6 +/meetings/tags/cap-60 +/meetings/tags/cap-62 +/meetings/tags/cap-63 +/meetings/tags/cap-64 +/meetings/tags/cap-65 +/meetings/tags/cap-66 +/meetings/tags/cap-67 +/meetings/tags/cap-68 +/meetings/tags/cap-69 +/meetings/tags/cap-7 +/meetings/tags/cap-70 +/meetings/tags/cap-71 +/meetings/tags/cap-72 +/meetings/tags/cap-73 +/meetings/tags/cap-74 +/meetings/tags/cap-75 +/meetings/tags/cap-77 +/meetings/tags/cap-78 +/meetings/tags/cap-79 +/meetings/tags/cap-8 +/meetings/tags/cap-80 +/meetings/tags/cap-9 +/meetings/tags/community +/meetings/tags/community/page/2 /meetings/tags/developer /meetings/tags/developer/page/2 /meetings/tags/developer/page/3 /meetings/tags/developer/page/4 /meetings/tags/developer/page/5 -/meetings/tags/protocol +/meetings/tags/legacy +/meetings/tags/legacy/page/2 +/meetings/tags/legacy/page/3 +/meetings/tags/sep-1 +/meetings/tags/sep-10 +/meetings/tags/sep-12 +/meetings/tags/sep-13 +/meetings/tags/sep-16 +/meetings/tags/sep-17 +/meetings/tags/sep-19 +/meetings/tags/sep-24 +/meetings/tags/sep-26 +/meetings/tags/sep-3 +/meetings/tags/sep-30 +/meetings/tags/sep-31 +/meetings/tags/sep-40 +/meetings/tags/sep-41 +/meetings/tags/sep-42 +/meetings/tags/sep-43 +/meetings/tags/sep-5 +/meetings/tags/sep-6 +/meetings/tags/sep-8 +/meetings/tags/sep-9 +/meetings/tags/soroban +/meetings/tags/soroban/page/2 +/meetings/tags/spotlight +/meetings/tags/spotlight/page/2 +/meetings/tags/tutorial /search diff --git a/scripts/clean_transcripts.py b/scripts/clean_transcripts.py new file mode 100644 index 0000000000..62ee72fa15 Binary files /dev/null and b/scripts/clean_transcripts.py differ diff --git a/scripts/long_context_transcripts.py b/scripts/long_context_transcripts.py new file mode 100644 index 0000000000..642021570e Binary files /dev/null and b/scripts/long_context_transcripts.py differ diff --git a/scripts/normalize_transcripts.py b/scripts/normalize_transcripts.py new file mode 100644 index 0000000000..a7c7296fd9 Binary files /dev/null and b/scripts/normalize_transcripts.py differ diff --git a/scripts/stellar_cli.mjs b/scripts/stellar_cli.mjs index aac91cfd98..729d126199 100644 --- a/scripts/stellar_cli.mjs +++ b/scripts/stellar_cli.mjs @@ -49,7 +49,7 @@ const fullHelpDocsContent = fs.readFileSync(fullHelpDocsPath, "utf8"); const modifiedContent = `--- sidebar_position: 10 -description: This document contains the help content for the stellar command-line program. +description: This document contains the help content for the Stellar command-line program. --- ${fullHelpDocsContent} diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000000..1ae72a7c9c --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,5655 @@ + + + + + https://stellar.org/grants-and-funding/matching-fund + + + + + https://stellar.org/use-cases/tokenization + + + + + https://stellar.org/learn/swap-functionality-and-amms + + + + + https://stellar.org/resources/cbdcs-for-policymakers + + + + + https://stellar.org/case-studies/vesseo + + + + + https://stellar.org/learn/compostability-in-defi + + + + + https://stellar.org/resources + + + + + https://stellar.org/press + + + + + https://stellar.org/learn/what-are-stablecoins + + + + + https://stellar.org/learn/tokenized-investment-assets + + + + + https://stellar.org/foundation/roadmap + + + + + https://stellar.org/ecosystem + + + + + https://stellar.org/case-studies/unhcr + + + + + https://stellar.org/case-studies/airtm-x-bridge-cross-border-payments + + + + + https://stellar.org/community/blog-guidelines + + + + + https://stellar.org/privacy-policy + + + + + https://stellar.org/case-studies + + + + + https://stellar.org/case-studies/meru-wallet-uses-blend-defi-protocol-for-yield + + + + + https://stellar.org + + + + + https://stellar.org/learn/intro-to-stellar + + + + + https://stellar.org/resources/efficient-by-design + + + + + https://stellar.org/foundation/previous-mandate + + + + + https://stellar.org/learn/lending-and-borrowing-markets + + + + + https://stellar.org/resources/payfi-ecosystem-report + + + + + https://stellar.org/learn/defi + + + + + https://stellar.org/products-and-tools/disbursement-platform + + + + + https://stellar.org/case-studies/meru-wallet-uses-blend-defi-protocol-for-yield-v2 + + + + + https://stellar.org/learn/wallets-to-store-send-and-receive-lumens + + + + + https://stellar.org/use-cases/ramps + + + + + https://stellar.org/bd-kyc + + + + + https://stellar.org/products-and-tools/anchor-platform + + + + + https://stellar.org/guidebook/cbdcs-for-policymakers + + + + + https://stellar.org/products-and-tools/wallet-sdk + + + + + https://stellar.org/learn/payfi + + + + + https://stellar.org/community/dev-diaries + + + + + https://stellar.org/soroban/enterprise-applications + + + + + https://stellar.org/connect + + + + + https://stellar.org/resources/stellar-for-cross-border-business-payments + + + + + https://stellar.org/policy-hub/sustainability + + + + + https://stellar.org/institutions-solutions + + + + + https://stellar.org/learn/lumens + + + + + https://stellar.org/foundation/next-gen + + + + + https://stellar.org/developers + + + + + https://stellar.org/grants-and-funding/academic-research-grants + + + + + https://stellar.org/community/build-better-challenge + + + + + https://stellar.org/foundation/strategy + + + + + https://stellar.org/meridian-dev-referral-program + + + + + https://stellar.org/case-studies/getpaid + + + + + https://stellar.org/resources/stellar-for-cbdcs + + + + + https://stellar.org/blog + + + + + https://stellar.org/resources/quick-explainer-on-crypto-blockchain-and-stellar + + + + + https://stellar.org/learn/the-power-of-stellar + + + + + https://stellar.org/case-studies/arf + + + + + https://stellar.org/enterprise-fund/why-stellar + + + + + https://stellar.org/resilientblockchain + + + + + https://stellar.org/policy-hub/briefs + + + + + https://stellar.org/blockchain-basics + + + + + https://stellar.org/blog/developers + + + + + https://stellar.org/use-cases/exchanges + + + + + https://stellar.org/foundation/mandate + + + + + https://stellar.org/learn/composability-vs-interoperability + + + + + https://stellar.org/meridian-addendum + + + + + https://stellar.org/resources/stellar-aid-assist + + + + + https://stellar.org/use-cases/stellar-for-aid + + + + + https://stellar.org/block-by-block + + + + + https://stellar.org/foundation/careers + + + + + https://stellar.org/products-and-tools/stellar-usdc + + + + + https://stellar.org/community/events + + + + + https://stellar.org/quarterly-reports + + + + + https://stellar.org/learn/stellar-consensus-protocol + + + + + https://stellar.org/soroban + + + + + https://stellar.org/brand/stellar-style-guide + + + + + https://stellar.org/case-studies/fonbnk + + + + + https://stellar.org/policy-hub + + + + + https://stellar.org/learn/anchor-basics + + + + + https://stellar.org/foundation/north-stars + + + + + https://stellar.org/blog/ecosystem + + + + + https://stellar.org/blog/policy + + + + + https://stellar.org/policy-hub/public-blockchains + + + + + https://stellar.org/case-studies/moneygram-international + + + + + https://stellar.org/policy-hub/financial-inclusion + + + + + https://stellar.org/foundation/team + + + + + https://stellar.org/policy-hub/financial-access + + + + + https://stellar.org/community + + + + + https://stellar.org/policy-hub/interoperability + + + + + https://stellar.org/case-studies/felix-bitso + + + + + https://stellar.org/policy-hub/decentralization + + + + + https://stellar.org/learn/tokenization-basics + + + + + https://stellar.org/case-studies/owny + + + + + https://stellar.org/grants-and-funding/matching-fund-eligibility-guidelines + + + + + https://stellar.org/faq + + + + + https://stellar.org/enterprise-fund + + + + + https://stellar.org/policy-hub/tokenization + + + + + https://stellar.org/grants-and-funding/matching-fund-female-founders + + + + + https://stellar.org/foundation/code-of-conduct + + + + + https://stellar.org/fintech-solutions + + + + + https://stellar.org/learn/proof-of-agreement + + + + + https://stellar.org/policy-hub/open-systems + + + + + https://stellar.org/brand-resources + + + + + https://stellar.org/enterprise-fund/portfolio + + + + + https://stellar.org/policy-hub/asset-tokenization + + + + + https://stellar.org/circle-usdc-eurc + + + + + https://stellar.org/grants-and-funding/bug-bounty + + + + + https://stellar.org/use-cases + + + + + https://stellar.org/ecosystem/stellar-info-file + + + + + https://stellar.org/terms-of-service + + + + + https://stellar.org/grants-and-funding + + + + + https://stellar.org/use-cases/payments + + + + + https://stellar.org/developers/tutorials + + + + + https://stellar.org/case-studies/tpn + + + + + https://stellar.org/resources/blockchains-impact-on-financial-inclusion + + + + + https://stellar.org/enterprise-fund/apply + + + + + https://stellar.org/audit-bank/projects + + + + + https://stellar.org/case-studies/novatti-audc + + + + + https://stellar.org/resources/cryptocurrency-adoption-report-2022 + + + + + https://stellar.org/products-and-tools/moneygram + + + + + https://stellar.org/resources/addendum-to-the-blockchain-sustainability-framework-eu-mica-regulation + + + + + https://stellar.org/products-and-tools/circle-usdc-eurc + + + + + https://stellar.org/bounty-eligibility-guidelines + + + + + https://stellar.org/policy-hub/aid-delivery + + + + + https://stellar.org/ethcc + + + + + https://stellar.org/resources/built-on-stellar-case-studies + + + + + https://stellar.org/use-cases/defi + + + + + https://stellar.org/protocol-upgrades + + + + + https://stellar.org/resources/the-block + + + + + https://stellar.org/learn/zero-knowledge-proof + + + + + https://stellar.org/cookie-policy + + + + + https://stellar.org/ecosystem/service-providers + + + + + https://stellar.org/learn/crypto-smart-contract-wallets + + + + + https://stellar.org/products-and-tools/stellar-eurc + + + + + https://stellar.org/learn/cross-border-payments + + + + + https://stellar.org/sustainability + + + + + https://stellar.org/products-and-tools + + + + + https://stellar.org/grants-and-funding/soroban-audit-bank + + + + + https://stellar.org/brand-policy + + + + + https://stellar.org/resources/sustainability-report + + + + + https://stellar.org/kyc + + + + + https://stellar.org/community/socialclub + + + + + https://stellar.org/commit/affirmations + + + + + https://stellar.org/learn + + + + + https://stellar.org/blog/legacy + + + + + https://stellar.org/blog/foundation-news + + + + + https://stellar.org/event-resources + + + + + https://stellar.org/learn/smart-contract-building-blocks + + + + + https://stellar.org/consensus + + + + + https://stellar.org/foundation + + + + + https://stellar.org/tokenization-solutions + + + + + https://stellar.org/case-studies/nonco + + + + + https://stellar.org/policy-hub/payments + + + + + https://stellar.org/press/real-world-stories-the-onchain-effect + 2025-10-28 + + + + https://stellar.org/community/events/digital-assets-week-new-york + 2025-10-28 + + + + https://stellar.org/learn/smart-contract-basics-oracles + + + + + https://stellar.org/blog/developers/protocol-22-upgrade-guide + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/cutting-cross-border-payroll-costs-by-20-25-with-airtm + 2025-12-08 + + + + https://stellar.org/blog/developers/fee-bump-bug-disclosure + 2025-10-28 + + + + https://stellar.org/community/events/building-tomorrow-incubation-and-growth-of-early-stage-web3-startups + 2025-10-28 + + + + https://stellar.org/resources/matching-fund-female-founders + + + + + https://stellar.org/blog/ecosystem/can-privacy-and-openness-coexist + 2025-12-12 + + + + https://stellar.org/blog/developers/protocol-21-is-live-on-stellar-mainnet + 2025-10-28 + + + + https://stellar.org/blog/developers/the-anchor-platform-creating-access-through-on-and-off-ramps + 2025-10-28 + + + + https://stellar.org/blog/developers/addressing-state-archival-inconsistencies-protocol-upgrade-vote-next-week + 2025-10-28 + + + + https://stellar.org/press/draper-university-to-invest-in-the-stellar-ecosystem + 2025-10-28 + + + + https://stellar.org/case-studies/vibrant + + + + + https://stellar.org/blog/developers/welcoming-zodia-custody-as-a-technical-host-of-the-stellar-disbursement-platform + 2025-10-28 + + + + https://stellar.org/resources/cryptocurrency-adoption-report-2022 + + + + + https://stellar.org/blog/ecosystem/stellar-i-awards-2025 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-at-digital-assets-summit + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/redswan-cre-brings-tokenized-real-estate-to-the-stellar-network + 2025-10-28 + + + + https://stellar.org/learn/lumens + + + + + https://stellar.org/community/events/scf-project-pitches-22-december-batch + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/better-on-stellar-challenge-from-what-if-to-what-now + 2025-10-28 + + + + https://stellar.org/blog/developers/how-to-use-the-openzeppelin-contract-wizard + 2025-10-28 + + + + https://stellar.org/community/events/fest-by-fwb + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/scf-spotlights-coala-pay + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-20-and-smart-contracts-are-live-on-mainnet + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-realities-of-delivering-aid + 2025-10-28 + + + + https://stellar.org/blog/developers/the-road-to-5000-tps-scaling-stellar-in-2025 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/decentralife-fernanda-orduna-rangel-decaf + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/q2-2025-quarterly-report + 2025-10-28 + + + + https://stellar.org/community/events/scf-project-pitches-28-june-batch + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q2-2024-in-review + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-protocol-21 + 2025-10-28 + + + + https://stellar.org/case-studies/owny + + + + + https://stellar.org/community/events/wef-annual-meeting-2024 + 2025-10-28 + + + + https://stellar.org/blog/developers/money-token-transfer-processor + 2025-10-28 + + + + https://stellar.org/press/united-nations-development-programme-and-stellar-development-foundation-join-forces-to-advance-blockchain-solutions-for-financial-inclusion + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-the-all-new-stellar-lab + 2025-10-28 + + + + https://stellar.org/press/redswan-digital-real-estate-brings-usd100m-of-tokenized-real-estate-to-the-stellar-network + 2025-10-28 + + + + https://stellar.org/community/events/scf-project-pitches-21-november-batch + 2025-10-28 + + + + https://stellar.org/community/events/token2049-singapore + 2025-10-28 + + + + https://stellar.org/case-studies/getpaid-case-study + + + + + https://stellar.org/community/events/permissionless-iv + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-protocol-22 + 2025-10-28 + + + + https://stellar.org/resources/built-on-stellar-case-studies + + + + + https://stellar.org/community/events/webx-tokyo + 2025-10-28 + + + + https://stellar.org/blog/developers/evm-to-soroban-understanding-data-types-or-solidity-to-rust-series-pt-2 + 2025-10-28 + + + + https://stellar.org/blog/developers/smart-contract-wallet-origins-and-the-future-of-crypto-management + 2025-10-28 + + + + https://stellar.org/blog/policy/open-systems-the-blueprint-for-a-connected-future + 2025-10-28 + + + + https://stellar.org/community/events/spark-23-by-fireblocks + 2025-10-28 + + + + https://stellar.org/community/events/omfif-digital-money-summit-2025 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-at-singapore-fintech-festival + 2025-11-04 + + + + https://stellar.org/community/events/stellar-night-web-summit-rio + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-stellar-way + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/one-year-of-stellar-aid-assist + 2025-10-28 + + + + https://stellar.org/press/tala-unveils-non-custodial-crypto-wallet-to-further-empower-the-global-majority + 2025-10-28 + + + + https://stellar.org/case-studies/tpn + + + + + https://stellar.org/blog/developers/introducing-state-archival-part-2-scalability-vs-solana-s-avocado + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/proof-of-stake-vs-proof-of-agreement-stellars-security-edge + 2025-10-28 + + + + https://stellar.org/case-studies/airtm-x-bridge-case-study + + + + + https://stellar.org/community/events/digital-assets-week-london + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-benefits-and-cost-of-sending-money-home + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q3-2024-in-review + 2025-10-28 + + + + https://stellar.org/community/events/meridian-2024 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/down-to-earth-brand-to-match-our-mission + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/2024-sdf-research-year-in-review + 2025-10-28 + + + + https://stellar.org/blog/developers/passkeys-a-light-introduction-to-improving-blockchain-s-ux + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q3-2024-in-review + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-the-stellar-i-award-winners + 2025-10-28 + + + + https://stellar.org/learn/cross-border-payments + + + + + https://stellar.org/blog/developers/horizon-accelerating-reingestion-performance-with-cdp + 2025-10-28 + + + + https://stellar.org/community/events/stellar-house + 2025-12-04 + + + + https://stellar.org/blog/developers/composable-data-platform + 2025-10-28 + + + + https://stellar.org/community/events/the-future-of-money-governance-and-the-law + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/state-archival-issue-post-mortem + 2025-11-14 + + + + https://stellar.org/blog/developers/stellar-events-retroactive-events + 2025-10-28 + + + + https://stellar.org/community/events/meridian-2025 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q3-2023-in-review + 2025-10-28 + + + + https://stellar.org/community/events/blockchain-rio + 2025-10-28 + + + + https://stellar.org/blog/developers/soroban-the-smart-contract-platform-designed-for-developers + 2025-10-28 + + + + https://stellar.org/press/archax-and-stellar-cement-strategic-collaboration-for-tokenised-rwas + 2025-10-28 + + + + https://stellar.org/community/events/sdf-at-fwb-fest + 2025-10-28 + + + + https://stellar.org/press/paxos-plans-expansion-to-stellar-network-in-collaboration-with-stellar-development-foundation + 2025-10-28 + + + + https://stellar.org/blog/developers/stellar-x-ray-protocol-25-upgrade-guide + 2025-12-10 + + + + https://stellar.org/community/events/stellar-development-foundation-s-q1-2025-in-review + 2025-10-28 + + + + https://stellar.org/blog/developers/a-peek-at-meridian-s-dev-underground-the-hyper-speed-stage + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/an-innovator-s-journey-insights-on-blockchain-space-exploration-and-ai-impact + 2025-10-28 + + + + https://stellar.org/case-studies/nonco + + + + + https://stellar.org/blog/developers/announcing-rpciege-siege-5-space-continuum + 2025-10-28 + + + + https://stellar.org/community/events/the-future-of-money + 2025-10-28 + + + + https://stellar.org/community/events/token2049-dubai + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/strategy-for-privacy-on-blockchain + 2025-10-28 + + + + https://stellar.org/press/yellow-card-introduces-usdc-on-stellar + 2025-10-28 + + + + https://stellar.org/community/events/stellar-sunday-presented-by-friends-with-benefits + 2025-10-28 + + + + https://stellar.org/blog/developers/an-introduction-to-vibe-coding + 2025-10-28 + + + + https://stellar.org/blog/policy/sdfs-lesley-chavkin-joins-stablecoin-standard-advisory-board + 2025-10-28 + + + + https://stellar.org/blog/developers/yield-bearing-assets-stellar-soroban + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/block-by-block-with-denelle-dixon-trust-disruption-and-navigating-the-decentralized-frontier-ft-konstantin-richter-ceo-of-blockdaemon + 2025-10-28 + + + + https://stellar.org/community/events/consensus + 2025-10-28 + + + + https://stellar.org/resources/the-block-ramps-research + + + + + https://stellar.org/case-studies/arf + + + + + https://stellar.org/community/events/encode-x-stellar-educate-develop-your-first-dapp-on-stellar-using-soroban + 2025-10-28 + + + + https://stellar.org/community/events/scf-project-pitches-27-may-batch + 2025-10-28 + + + + https://stellar.org/case-studies/bigger + + + + + https://stellar.org/blog/ecosystem/stellar-community-fund-a-new-era-of-community-empowerment-begins + 2025-10-28 + + + + https://stellar.org/press/moneygram-announces-plans-to-launch-non-custodial-digital-wallet + 2025-10-28 + + + + https://stellar.org/community/events/money2020-usa + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q3-2023-in-review + 2025-10-28 + + + + https://stellar.org/case-studies/felix-bitso + + + + + https://stellar.org/press/21x-technically-integrated-on-the-stellar-network-solidifying-its-position-as-a-regulated-multi-chain-trading-venue + 2025-12-18 + + + + https://stellar.org/blog/developers/stellar-dev-diaries-series-episode-1-is-live + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/welcoming-jason-karsh-chief-marketing-officer-at-sdf + 2025-10-28 + + + + https://stellar.org/learn/wallets-to-store-send-and-receive-lumens + + + + + https://stellar.org/blog/ecosystem/u-s-bank-is-testing-custom-stablecoin-issuance-on-the-stellar-network + 2025-11-25 + + + + https://stellar.org/community/events/stellar-ecosystem-meetup-ghana + 2025-10-28 + + + + https://stellar.org/resources/quick-explainer-on-crypto-blockchain-and-stellar + + + + + https://stellar.org/resources/sustainability-report + + + + + https://stellar.org/blog/foundation-news/stellar-rpc-has-arrived + 2025-10-28 + + + + https://stellar.org/blog/developers/parallelizing-stellar-core-the-first-step-toward-5000-tps + 2025-10-28 + + + + https://stellar.org/blog/developers/stellar-events-track-all-the-things + 2025-10-28 + + + + https://stellar.org/community/events/stablecoin-standard-flagship-event + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-24-upgrade-guide + 2025-10-28 + + + + https://stellar.org/blog/developers/new-protocol-20-vote + 2025-10-28 + + + + https://stellar.org/community/events/building-inclusive-finance-with-blockchain + 2025-10-28 + + + + https://stellar.org/community/events/navigating-the-future-stablecoin-market + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/three-years-with-moneygram + 2025-10-28 + + + + https://stellar.org/blog/policy/innovation-and-regulation-sdf-s-response-to-french-regulators + 2025-10-28 + + + + https://stellar.org/press/ondo-finance-launches-usdy-on-stellar + 2025-10-28 + + + + https://stellar.org/community/events/crypto-capitol-ethdc-ii + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-quarter-in-review-q1-2024 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/q1-2025-quarterly-report + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/bulk-disbursements-with-beans-app-and-boss-money + 2025-10-28 + + + + https://stellar.org/blog/developers/how-trust-works-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/community/events/stellar-ecosystem-meetup-kenya + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/cables-finance-launch-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-galexie-efficiently-extract-and-store-stellar-data + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-quarter-in-review-q4-2023 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/decentralization-myths-debunked + 2025-10-28 + + + + https://stellar.org/blog/policy/standing-with-the-industry-on-fit21 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/sdf-partners-with-openzeppelin-to-enhance-stellar-smart-contract-development + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q1-2024-in-review + 2025-10-28 + + + + https://stellar.org/press/centrifuge-brings-derwa-to-stellar-launching-with-usd20m-into-dejtrsy-and-dejaaa + 2025-10-28 + + + + https://stellar.org/blog/developers/composability-more-connections-more-possibilities + 2025-10-28 + + + + https://stellar.org/blog/developers/sorobanathon-road-to-mainnet-a-retrospective + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/scf-spotlight-diego-yanez-of-alfred + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-s-q2-2025-quarter-in-review + 2025-10-28 + + + + https://stellar.org/learn/smart-contract-building-blocks + + + + + https://stellar.org/blog/developers/soroban-security-audit-bank-raising-the-standard-for-smart-contract-security + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-joins-blockchain-payments-consortium-as-founding-member + 2025-11-06 + + + + https://stellar.org/blog/foundation-news/welcoming-raja-chakravorti-chief-business-officer-at-sdf + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/five-years-of-transformation-and-still-new-beginnings + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-the-golang-stellar-sdk + 2025-12-02 + + + + https://stellar.org/community/events/stellar-ecosystem-meetup-nigeria + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-20-upgrade-guide + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q4-2023-in-review + 2025-10-28 + + + + https://stellar.org/resources/payfi-ecosystem-report + + + + + https://stellar.org/blog/ecosystem/scf-spotlight-stephanie-joseph-of-kura + 2025-10-28 + + + + https://stellar.org/press/world-s-first-on-chain-disbursement-of-universal-basic-income-delivered-on-the-stellar-network-via-digital-sovereign-bond-usdm1 + 2025-12-17 + + + + https://stellar.org/press/etherfuse-to-join-stellar-network-in-2025-ceo-david-taylor-announces-at-the-stellar-meridian-conference-in-london + 2025-10-28 + + + + https://stellar.org/case-studies/unhcr + + + + + https://stellar.org/press/eurc-launches-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/learn/blockchain-basics + + + + + https://stellar.org/blog/foundation-news/zarc + 2025-10-28 + + + + https://stellar.org/blog/developers/auth-entication-and-orization-in-blockchain + 2025-10-28 + + + + https://stellar.org/blog/developers/rpciege-siege-4-prepare-for-a-deeper-menace + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/adios-meridian-2023 + 2025-10-28 + + + + https://stellar.org/blog/policy/sdf-joins-wef-humanitarian-and-resilience-investing-initiative-to-support-most-vulnerable-communities + 2025-10-28 + + + + https://stellar.org/case-studies/fonbnk + + + + + https://stellar.org/blog/developers/how-to-migrate-smart-contracts-from-ethereum-s-solidity-to-soroban-rust + 2025-10-28 + + + + https://stellar.org/resources/efficient-by-design + + + + + https://stellar.org/community/events/future-of-digital-assets-powered-by-benzinga-unicoin-2023 + 2025-10-28 + + + + https://stellar.org/community/events/ethcc-8-cannes + 2025-10-28 + + + + https://stellar.org/learn/anchor-basics + + + + + https://stellar.org/blog/foundation-news/sdf-s-horizon-limiting-data-to-1-year + 2025-10-28 + + + + https://stellar.org/learn/smart-contract-basics + + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q4-2024-in-review + 2025-10-28 + + + + https://stellar.org/community/events/exploring-cbdc-2023-atlantic-council-and-digital-dollar-project + 2025-10-28 + + + + https://stellar.org/blog/developers/upcoming-database-changes-in-protocol-21 + 2025-10-28 + + + + https://stellar.org/community/events/conf3rence + 2025-10-28 + + + + https://stellar.org/community/events/digital-asset-summit + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-whisk-stellar-protocol-23 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/financial-freedom-is-feminine-stellar-presence-at-shefi-summit-brussels + 2025-10-28 + + + + https://stellar.org/community/events/spark-25 + 2025-10-28 + + + + https://stellar.org/press/wirex-and-stellar-go-live-with-dual-stablecoin-visa-settlement-in-usdc-and-eurc-for-7-million-users + 2025-11-18 + + + + https://stellar.org/community/events/blockchain-4-good + 2025-10-28 + + + + https://stellar.org/learn/stablecoins + + + + + https://stellar.org/blog/developers/explore-the-open-source-solution-for-bulk-payments-the-stellar-disbursement-platform + 2025-10-28 + + + + https://stellar.org/community/events/build-better-smart-contract-challenge + 2025-10-28 + + + + https://stellar.org/blog/developers/rpc-now-with-infinite-scroll + 2025-10-28 + + + + https://stellar.org/community/events/developer-and-protocol-meeting + 2025-12-18 + + + + https://stellar.org/community/events/soroban-x-rise-in-happy-hour-ethdenver-2024 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/securing-smart-contracts-and-the-future-of-defi + 2025-10-28 + + + + https://stellar.org/blog/developers/cdp-and-rpc-more-like-ez-and-pz + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/decentralife-reeps100 + 2025-10-28 + + + + https://stellar.org/community/events/scf-project-pitches-23-january-batch + 2025-10-28 + + + + https://stellar.org/blog/podcasts/etherfuse-aims-to-bring-100-sovereign-currencies-onchain + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/global-perspective-on-digital-asset-innovation-with-tajinder-singh + 2025-10-28 + + + + https://stellar.org/community/events/hack-o-ween-hackathon + 2025-10-28 + + + + https://stellar.org/community/events/scf-project-pitches-29-july-batch + 2025-10-28 + + + + https://stellar.org/resources/blockchains-impact-on-financial-inclusion + + + + + https://stellar.org/blog/developers/the-definitive-guide-to-testing-smart-contracts-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/policy/esma-consultation-response-sustainability-and-esg-insights + 2025-10-28 + + + + https://stellar.org/press/stellar-based-vibrant-wallet-surges-driven-by-adoption-in-high-inflation-countries + 2025-10-28 + + + + https://stellar.org/press/paypal-pyusd + 2025-10-28 + + + + https://stellar.org/press/smart-contracts-launch-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/decentralife-bernardo-garcia-felix + 2025-10-28 + + + + https://stellar.org/community/events/aidex-2024 + 2025-10-28 + + + + https://stellar.org/community/events/thebridge-presented-by-the-tie + 2025-10-28 + + + + https://stellar.org/community/events/3i-summit-africa + 2025-10-28 + + + + https://stellar.org/community/events/get-to-know-soroswap-alterscope-and-kwickbit + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-stellar-x-ray-protocol-25 + 2025-11-24 + + + + https://stellar.org/blog/developers/soroban-is-feature-complete + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/circle-cctp-v2-is-coming-to-stellar + 2025-10-28 + + + + https://stellar.org/blog/developers/our-decision-to-disarm-validators-and-vote-to-postpone-the-protocol-20-upgrade + 2025-10-28 + + + + https://stellar.org/blog/policy/demystifying-decentralization-a-practical-framework + 2025-10-28 + + + + https://stellar.org/community/events/sdf-at-pbw-2024 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/the-permissionless-systems-create-value + 2025-11-19 + + + + https://stellar.org/resources/confirmation + + + + + https://stellar.org/case-studies/meru-wallet-uses-blend-defi-protocol-for-yield + + + + + https://stellar.org/press/stellar-statement-on-wisdomtree-prime-being-available-to-users-in-41-states + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-fund-usd10m-in-xlm-distributed-in-10-review-rounds + 2025-10-28 + + + + https://stellar.org/blog/developers/rpciege-axe-and-ember-get-set-up + 2025-10-28 + + + + https://stellar.org/learn/tokenization-basics + + + + + https://stellar.org/press/via-and-stellar-development-foundation-collaborate-with-hope-for-haiti-to-deliver-safe-cashless-aid + 2025-12-18 + + + + https://stellar.org/community/events/stellar-at-devconnect-argentina + 2025-11-21 + + + + https://stellar.org/press/mercado-bitcoin-announces-usd200-million-rwa-issuance-on-stellar-network + 2025-10-28 + + + + https://stellar.org/community/events/shefi-summit-in-brussels + 2025-10-28 + + + + https://stellar.org/resources/stellar-for-cross-border-business-payments + + + + + https://stellar.org/blog/developers/introducing-stellar-community-fund-v6-0 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/its-expensive-to-be-poor + 2025-10-28 + + + + https://stellar.org/community/events/ethdc + 2025-10-28 + + + + https://stellar.org/community/events/scf-project-pitches-20-october-batch + 2025-10-28 + + + + https://stellar.org/learn/stellar-consensus-protocol + + + + + https://stellar.org/blog/developers/the-full-package-tokenizing-securities-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/learn/swap-functionality-and-amms + + + + + https://stellar.org/blog/developers/validator-guide-to-soroban-mainnet-phase-1 + 2025-10-28 + + + + https://stellar.org/press/stellar-joins-the-mastercard-crypto-credential-ecosystem-to-unlock-verified-interactions-across-public-blockchain-networks + 2025-10-28 + + + + https://stellar.org/blog/policy/open-systems-and-financial-inclusion-sdf-s-treasury-response + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-the-deep-dive-docs-bounty + 2025-10-28 + + + + https://stellar.org/resources/stellar-aid-assist + + + + + https://stellar.org/blog/developers/the-soroban-audit-bank-fostering-a-secure-smart-contract-ecosystem + 2025-10-28 + + + + https://stellar.org/community/events/crypto-and-digital-assets-summit + 2025-10-28 + + + + https://stellar.org/community/events/digital-assets-week-london + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/researching-heterogeneous-trust-in-distributed-systems + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-protocol-23 + 2025-10-28 + + + + https://stellar.org/community/events/consensus-2025 + 2025-10-28 + + + + https://stellar.org/case-studies/irc + + + + + https://stellar.org/blog/developers/npm-supply-chain-attack-response + 2025-10-28 + + + + https://stellar.org/community/events/sdf-at-devcon-2024 + 2025-10-28 + + + + https://stellar.org/case-studies/novatti-audc + + + + + https://stellar.org/community/events/aidex-2025 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/the-story-of-kale + 2025-12-02 + + + + https://stellar.org/community/events/money-20-20-las-vegas + 2025-10-28 + + + + https://stellar.org/community/events/rwa-paris-2025 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/welcoming-jose-fdaponte-president-chief-growth-officer-at-sdf + 2025-10-28 + + + + https://stellar.org/blog/developers/smart-contracts-made-simple-dogfooding-soroban-at-the-sdf + 2025-10-28 + + + + https://stellar.org/press/paypal-pyusd-is-now-available-on-stellar + 2025-10-28 + + + + https://stellar.org/community/events/crypto-connect-ny-a-vip-institutional-cocktail-mixer + 2025-10-28 + + + + https://stellar.org/press/turbo-energy-taurus-and-stellar-join-forces-to-tap-into-usd74-43-billion-eaas-market-through-tokenization-of-hybrid-renewable-energy-projects + 2025-11-13 + + + + https://stellar.org/blog/ecosystem/decentralife-neema-adam + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-joins-calp-digital-assets-payments-humanitarian-aid + 2025-10-28 + + + + https://stellar.org/community/events/emgergence + 2025-10-28 + + + + https://stellar.org/community/events/sdf-at-istanbul-blockchain-week + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-state-archival-the-solution-to-state-bloat-on-stellar + 2025-10-28 + + + + https://stellar.org/community/events/stellar-at-paris-blockchain-week + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/why-open-infrastructure-serves-everyone-better + 2025-10-28 + + + + https://stellar.org/blog/policy/omfif-digital-assets-2024-report + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/an-updated-mandate-for-a-growing-stellar-ecosystem + 2025-12-12 + + + + https://stellar.org/press/societe-generale-forge-advances-its-multichain-strategy-and-selects-the-stellar-network-to-deploy-its-mica-compliant-stablecoin + 2025-10-28 + + + + https://stellar.org/community/events/point-zero-forum + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-20-preparing-smart-contracts-to-stellar + 2025-10-28 + + + + https://stellar.org/community/events/sifma-x-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/looking-back-at-wef-2024 + 2025-10-28 + + + + https://stellar.org/blog/developers/stellar-dev-diaries-episode-2-becoming-fundable + 2025-10-28 + + + + https://stellar.org/learn/proof-of-agreement + + + + + https://stellar.org/resources/stellar-for-cbdcs + + + + + https://stellar.org/press/a-milestone-in-smart-contract-security-certora-teams-up-with-the-stellar-network-s-smart-contract-platform-soroban + 2025-10-28 + + + + https://stellar.org/resources/guide-to-stellar + + + + + https://stellar.org/blog/developers/the-stellar-network-s-phased-rollout-of-smart-contracts-the-road-to-mainnet + 2025-10-28 + + + + https://stellar.org/community/events/salt-wyoming-blockchain-symposium + 2025-10-28 + + + + https://stellar.org/learn/the-power-of-stellar + + + + + https://stellar.org/blog/foundation-news/introducing-the-new-stellar-passkey-feature-seamless-web3-smart-wallet-functionality-on-mainnet + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/welcoming-ondo-s-usdy-to-the-stellar-network + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/cryptos-next-breakthrough-isnt-a-coin-its-settlement + 2025-12-16 + + + + https://stellar.org/blog/developers/introducing-the-new-and-improved-stellar-asset-sandbox + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stable-is-hot + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/faces-of-the-stellar-ecosystem-a-photo-journal-through-ghana-nigeria-and-kenya + 2025-10-28 + + + + https://stellar.org/community/events/stellar-at-reuters-next-new-york + 2025-11-17 + + + + https://stellar.org/community/events/token-2049-singapore + 2025-10-28 + + + + https://stellar.org/blog/developers/sorobounty-spectacular-dapp-tutorials + 2025-10-28 + + + + https://stellar.org/press/gmo-z-com-trust-company-announces-launch-of-stablecoins-gyen-and-zusd-on-bitstamp + 2025-10-28 + + + + https://stellar.org/community/events/stellar-sync-ethcc + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/2024-year-in-review + 2025-10-28 + + + + https://stellar.org/community/events/token2049-dubai + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-23-upgrade-guide + 2025-12-05 + + + + https://stellar.org/blog/ecosystem/niche-to-necessity-stablecoins-vs-cash + 2025-10-28 + + + + https://stellar.org/case-studies/wisdomtree + + + + + https://stellar.org/blog/developers/build-better-on-stellar-smart-contract-challenge-winners + 2025-10-28 + + + + https://stellar.org/community/events/blockchain-rio + 2025-10-28 + + + + https://stellar.org/press/21x-and-stellar-open-regulated-pathways-for-tokenized-capital-markets + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-to-join-chainlink-scale-and-adopt-data-feeds-data-streams-and-ccip-to-power-next-gen-defi-applications + 2025-10-31 + + + + https://stellar.org/blog/developers/protocol-21-upgrade-guide + 2025-10-28 + + + + https://stellar.org/resources/cbdcs-for-policymakers + + + + + https://stellar.org/case-studies/decaf + + + + + https://stellar.org/community/events/stellar-at-money-20-20-usa + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q2-2024-in-review + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/global-bonds-local-impact-stablebonds-on-stellar + 2025-10-28 + + + + https://stellar.org/resources/addendum-to-the-blockchain-sustainability-framework-eu-mica-regulation + + + + + https://stellar.org/blog/foundation-news/cash-to-defi-the-consensus-x-easya-2024-stellar-hackathon + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-anchor-network-interactive-map + 2025-10-28 + + + + https://stellar.org/community/events/baf-or-stellar-builders-retreat + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/department-of-commerce-posts-gdp-data-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/prototyping-privacy-pools-on-stellar + 2025-12-13 + + + + https://stellar.org/community/events/stellar-at-ethdenver + 2025-10-28 + + + + https://stellar.org/blog/developers/removing-ledger-state-from-sql-databases-in-22-1 + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-the-stellar-wallet-sdk-build-on-stellar-faster-than-ever-before + 2025-10-28 + + + + https://stellar.org/community/events/digiassets-connect + 2025-10-28 + + + + https://stellar.org/press/stasis-integrates-eurs-stablecoin-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/community/events/blockchain-association-policy-summit + 2025-10-28 + + + + https://stellar.org/case-studies/moneygram-international + + + + + https://stellar.org/blog/foundation-news/stellar-aid-assist-2-year-anniversary + 2025-10-28 + + + + https://stellar.org/press/taurus-custody-and-issuance-platforms-now-available-on-stellar + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-joins-erc3643-association + 2025-10-28 + + + + https://stellar.org/community/events/defi-security-summit + 2025-10-28 + + + + https://stellar.org/blog/developers/reconciling-stellar-events + 2025-11-13 + + + + https://stellar.org/blog/policy/sdf-at-point-zero-forum + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/krave-s-fresh-monkey-creativity-in-the-jungle-of-financial-freedom + 2025-10-28 + + + + https://stellar.org/blog/policy/blockchain-as-a-means-to-broaden-digital-financial-inclusion + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/q3-2025-quarterly-report + 2025-11-06 + + + + https://stellar.org/blog/developers/hubble-now-faster-than-light + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/tokenizing-real-estate-with-redswan + 2025-12-19 + + + + https://stellar.org/learn/intro-to-stellar + + + + + https://stellar.org/community/events/stellar-house + 2025-10-28 + + + + https://stellar.org/learn/tokenized-investment-assets + + + + + https://stellar.org/community/events/scf-project-pitches-25-march-batch + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/scf-spotlights-pavel-matveev-of-coca + 2025-11-13 + + + + https://stellar.org/learn/how-does-defi-work-for-lending-and-borrowing-markets + + + + + https://stellar.org/blog/developers/decentralization-double-time + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-blueprint-at-meridian-2025 + 2025-10-28 + + + + https://stellar.org/community/events/vff-fintech-forum + 2025-10-28 + + + + https://stellar.org/community/events/a-roadmap-for-digital-assets-with-democratic-values + 2025-10-28 + + + + https://stellar.org/community/events/cryptocurrency-and-the-global-market-panel + 2025-10-28 + + + + https://stellar.org/community/events/deposits-and-withdrawals-in-vibrant + 2025-10-28 + + + + https://stellar.org/blog/developers/transaction-submission-timeouts-and-dynamic-fees-faq + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-crea-tu-primer-smart-contract-con-stellar + 2025-10-28 + + + + https://stellar.org/community/events/reimagine-2021 + 2025-10-28 + + + + https://stellar.org/community/events/blockchain-africa-conference + 2025-10-28 + + + + https://stellar.org/community/events/create-innovative-nfts-on-stellar-using-javascript + 2025-10-28 + + + + https://stellar.org/blog/developers/how-we-simplified-fees-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/community/events/blockchain-nyc-virtual-meetup-understanding-stellar + 2025-10-28 + + + + https://stellar.org/community/events/la-blockchain-summit + 2025-10-28 + + + + https://stellar.org/community/events/sdf-at-consensus-distributed + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-05-26-22 + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-4-22-21 + 2025-10-28 + + + + https://stellar.org/blog/developers/guide-to-protocol-14-prep + 2025-10-28 + + + + https://stellar.org/community/events/soroban-developer-workshop-write-your-first-smart-contract-on-soroban + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-como-programar-transacciones-en-stellar + 2025-10-28 + + + + https://stellar.org/community/events/virtual-meetup-what-can-we-do-to-make-stellar-better + 2025-10-28 + + + + https://stellar.org/community/events/consensus-fireside-chat-ama-with-jed-mccaleb-on-financial-inclusion + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-16-upgrade-complete + 2025-10-28 + + + + https://stellar.org/community/events/chamber-of-digital-commerce-parallel-summit + 2025-10-28 + + + + https://stellar.org/blog/developers/the-principles-behind-protocol-design-amms-stellar + 2025-10-28 + + + + https://stellar.org/community/events/dacom-defi-2021-panel + 2025-10-28 + + + + https://stellar.org/community/events/understanding-cbdcs-a-policymakers-guide + 2025-10-28 + + + + https://stellar.org/community/events/the-state-of-international-remittance + 2025-10-28 + + + + https://stellar.org/community/events/blockchain-bootcamp-with-dfs-lab + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-crowdfunding-con-stellar-y-react + 2025-10-28 + + + + https://stellar.org/community/events/stellar-at-hacknyu + 2025-10-28 + + + + https://stellar.org/community/events/practical-path-payments + 2025-10-28 + + + + https://stellar.org/community/events/startup-camp-q2-2023 + 2025-10-28 + + + + https://stellar.org/community/events/how-to-succeed-in-the-stellar-community-fund + 2025-10-28 + + + + https://stellar.org/community/events/consumer-friendly-key-management-with-sep-30 + 2025-10-28 + + + + https://stellar.org/blog/developers/pull-mode-the-latest-initiative-to-increase-stellars-tps + 2025-10-28 + + + + https://stellar.org/blog/developers/how-to-validate-blockchain-code-with-stellar-supercluster + 2025-10-28 + + + + https://stellar.org/blog/developers/may-15th-network-halt + 2025-10-28 + + + + https://stellar.org/community/events/fintech-meetup + 2025-10-28 + + + + https://stellar.org/community/events/blockchain-camp-ecosistema-cripto-en-america-latina + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q1-2021-review + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q3-2021-review + 2025-10-28 + + + + https://stellar.org/community/events/kelp-gui + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-17-upgrade-complete + 2025-10-28 + + + + https://stellar.org/community/events/european-blockchain-convention-3 + 2025-10-28 + + + + https://stellar.org/blog/developers/surge-pricing-on-stellar-faq + 2025-10-28 + + + + https://stellar.org/community/events/continued-discussion-creating-a-stellar-ecosystem-standard-for-send-receive-transactions + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q2-in-review + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-10-14-21 + 2025-10-28 + + + + https://stellar.org/blog/developers/elliot-voris-elliotfriend-developers-on-stellar + 2025-10-28 + + + + https://stellar.org/community/events/how-to-create-an-scf-submission-worth-exploring + 2025-10-28 + + + + https://stellar.org/community/events/two-sides-of-the-american-coin-innovation-regulation-of-digital-assets + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion + 2025-10-28 + + + + https://stellar.org/community/events/africa-tech-summit-panel + 2025-10-28 + + + + https://stellar.org/community/events/ama-with-justin-rice + 2025-10-28 + + + + https://stellar.org/community/events/intuitive-stellar-consensus-protocol + 2025-10-28 + + + + https://stellar.org/community/events/webinar-how-to-use-stablecoins-in-crossborder-payments + 2025-10-28 + + + + https://stellar.org/community/events/whats-new-with-the-stellar-community-fund + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-18-upgrade-guide + 2025-10-28 + + + + https://stellar.org/community/events/women-building-in-blockchain-unique-perspectives-on-developing-products-and-businesses + 2025-10-28 + + + + https://stellar.org/community/events/stellar-quest-learn-stellar-win-prizes + 2025-10-28 + + + + https://stellar.org/blog/developers/action-required-new-lts-support-and-ubuntu-16-04-deprecation + 2025-10-28 + + + + https://stellar.org/community/events/how-to-build-an-anchor-with-polaris + 2025-10-28 + + + + https://stellar.org/community/events/gbbc-blockchain-central-unga-panel + 2025-10-28 + + + + https://stellar.org/community/events/stellar-austin-meetup + 2025-10-28 + + + + https://stellar.org/community/events/construyendo-el-futuro-del-sistema-financiero-con-blockchain + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-crea-tu-primer-token-o-ico-en-stellar + 2025-10-28 + + + + https://stellar.org/community/events/building-a-stellar-browser-extension + 2025-10-28 + + + + https://stellar.org/blog/developers/a-new-ticker-for-the-stellar-community + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-que-son-los-stellar-anchors + 2025-10-28 + + + + https://stellar.org/community/events/world-government-summit + 2025-10-28 + + + + https://stellar.org/community/events/blockchain-by-women-panel-ensuring-privacy-and-security-of-digital-assets-and-blockchain-apps + 2025-10-28 + + + + https://stellar.org/blog/developers/horizon-v0-22-0-released-protocol-12-support + 2025-10-28 + + + + https://stellar.org/community/events/chainalysis-links-the-future-of-currency + 2025-10-28 + + + + https://stellar.org/blog/developers/soroban-preview-release-2 + 2025-10-28 + + + + https://stellar.org/community/events/achieving-network-effects-on-an-open-network + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-como-crear-una-billetera-cripto-en-stellar-con-react + 2025-10-28 + + + + https://stellar.org/community/events/digital-currency-conference-panel + 2025-10-28 + + + + https://stellar.org/community/events/anchor-basics-the-business-and-benefits-of-anchors-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-6-17-21 + 2025-10-28 + + + + https://stellar.org/community/events/future-proof + 2025-10-28 + + + + https://stellar.org/blog/developers/a-new-sun-on-the-horizon + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-que-son-los-stellar-tools + 2025-10-28 + + + + https://stellar.org/blog/developers/a-new-go-sdk-for-the-stellar-network + 2025-10-28 + + + + https://stellar.org/community/events/stellar-virtual-meetup-september-2020 + 2025-10-28 + + + + https://stellar.org/blog/developers/failed-transaction-mitigation-faq + 2025-10-28 + + + + https://stellar.org/community/events/stellar-live-dec-3-20 + 2025-10-28 + + + + https://stellar.org/community/events/interop-summit + 2025-10-28 + + + + https://stellar.org/community/events/tiecon-2020-blockchain-for-social-impact + 2025-10-28 + + + + https://stellar.org/blog/developers/stellar-core-upgrading-to-ubuntu-22-04 + 2025-10-28 + + + + https://stellar.org/blog/developers/behind-the-scenes-with-speedex + 2025-10-28 + + + + https://stellar.org/community/events/crypto-leaders-symposium + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-06-09-22 + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-17-upgrade-guide + 2025-10-28 + + + + https://stellar.org/community/events/stellar-nft-hackathon + 2025-10-28 + + + + https://stellar.org/community/events/fintech-in-africa + 2025-10-28 + + + + https://stellar.org/community/events/consensus-2022 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-meetup-munich-christmas-party + 2025-10-28 + + + + https://stellar.org/blog/developers/our-new-horizon-ingestion-engine + 2025-10-28 + + + + https://stellar.org/community/events/crea-tu-primer-transaccion-con-stellar + 2025-10-28 + + + + https://stellar.org/community/events/meridian-2019 + 2025-10-28 + + + + https://stellar.org/blog/developers/intuitive-stellar-consensus-protocol + 2025-10-28 + + + + https://stellar.org/community/events/imf-world-bank-annual-meetings-cross-border-payments-the-private-sector-steps-up + 2025-10-28 + + + + https://stellar.org/community/events/stellar-en-pocas-palabras + 2025-10-28 + + + + https://stellar.org/community/events/the-future-of-cryptocurrency-regulation + 2025-10-28 + + + + https://stellar.org/community/events/mainnet-2022 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q1-in-review + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-3-11-21 + 2025-10-28 + + + + https://stellar.org/blog/developers/new-releases-stellar-core-v12-0-0-horizon-v0-21-0 + 2025-10-28 + + + + https://stellar.org/community/events/luxembourg-blockchain-week-panel + 2025-10-28 + + + + https://stellar.org/blog/developers/liquidity-liquidity-liquidity + 2025-10-28 + + + + https://stellar.org/community/events/national-briefing-of-women-of-color-in-blockchain-womens-history-month + 2025-10-28 + + + + https://stellar.org/community/events/dc-blockchain-summit + 2025-10-28 + + + + https://stellar.org/community/events/paris-blockchain-week-summit-panel-the-future-of-payments + 2025-10-28 + + + + https://stellar.org/community/events/workshop-build-your-own-stellar-wallet + 2025-10-28 + + + + https://stellar.org/community/events/stellar-virtual-meetup-latam + 2025-10-28 + + + + https://stellar.org/community/events/jed-mcmaleb-speaks-at-the-rzrblock-virtual-hackathon-of-the-university-of-arkansas + 2025-10-28 + + + + https://stellar.org/community/events/lendit-fintech + 2025-10-28 + + + + https://stellar.org/blog/developers/why-doesnt-soroban-use-a-jit + 2025-10-28 + + + + https://stellar.org/community/events/decentralized-community-funding + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-18-upgrade-complete + 2025-10-28 + + + + https://stellar.org/community/events/virtual-equality-lounge-redefining-flexibility-in-the-new-normal + 2025-10-28 + + + + https://stellar.org/blog/developers/fixing-memo-less-payments + 2025-10-28 + + + + https://stellar.org/blog/developers/lightning-on-stellar-roadmap + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-quarter-in-review-q1-2022 + 2025-10-28 + + + + https://stellar.org/community/events/asian-financial-forum-state-of-blockchain-fireside-chat + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q1-2023-in-review + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-quarter-in-review-q2-2022 + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-19-upgrade-guide + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-aprende-a-usar-la-api-de-stellar-horizon + 2025-10-28 + + + + https://stellar.org/community/events/podcast-episode-25 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-course-in-portuguese + 2025-10-28 + + + + https://stellar.org/community/events/denelle-dixon-speaks-at-blockdown-2-0 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-quarterly-review-q4-2020 + 2025-10-28 + + + + https://stellar.org/community/events/info-session-for-scf-11-startup-bootcamp + 2025-10-28 + + + + https://stellar.org/community/events/token2049-singapore + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-crowdfunding-con-stellar-part-ii + 2025-10-28 + + + + https://stellar.org/community/events/world-economic-forum + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-1-14-21 + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-13-upgrade-complete + 2025-10-28 + + + + https://stellar.org/community/events/gbbc-university-dialogues-on-blockchain-digital-assets + 2025-10-28 + + + + https://stellar.org/community/events/money-20-20-increasing-financial-access-services-with-tokenization + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-aprende-a-usar-la-red-de-stellar-desde-ceros + 2025-10-28 + + + + https://stellar.org/community/events/lendit-usa + 2025-10-28 + + + + https://stellar.org/community/events/blockshow-singapore-fintech-festival-panel-will-cbdcs-disrupt-stablecoins + 2025-10-28 + + + + https://stellar.org/blog/developers/building-sorobans-minimum-viable-ecosystem + 2025-10-28 + + + + https://stellar.org/community/events/build-on-stellar-pearl-hacks + 2025-10-28 + + + + https://stellar.org/blog/developers/what-happened-at-hack-a-soroban + 2025-10-28 + + + + https://stellar.org/community/events/creating-usable-stellar-applications + 2025-10-28 + + + + https://stellar.org/blog/developers/kelp-gui-your-first-automated-trading-bot + 2025-10-28 + + + + https://stellar.org/community/events/european-blockchain-convention-2 + 2025-10-28 + + + + https://stellar.org/community/events/how-wallets-and-apps-can-reach-cash-users-globally-with-moneygram + 2025-10-28 + + + + https://stellar.org/community/events/legislative-hearing-to-review-the-digital-commodities-consumer-protection-act + 2025-10-28 + + + + https://stellar.org/blog/developers/data-structure-bucketlistdb + 2025-10-28 + + + + https://stellar.org/community/events/episode-13 + 2025-10-28 + + + + https://stellar.org/community/events/digital-asset-and-cbdc-compliance-panel + 2025-10-28 + + + + https://stellar.org/community/events/stellar-nft-artist-challenge-sxsw + 2025-10-28 + + + + https://stellar.org/blog/developers/try-our-new-analytics-dataset + 2025-10-28 + + + + https://stellar.org/community/events/the-takeover + 2025-10-28 + + + + https://stellar.org/community/events/blockchain-finance-forum-keynote + 2025-10-28 + + + + https://stellar.org/community/events/techcrunch-disrupt + 2025-10-28 + + + + https://stellar.org/community/events/vivatech + 2025-10-28 + + + + https://stellar.org/community/events/meridian-2021 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-w-blockchain-sydney-enterprise-focus + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-2-17-2022 + 2025-10-28 + + + + https://stellar.org/community/events/la-blockchain-summit-stellars-next-wave-amms-new-partnerships-and-more + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-h-e-r-dao-hacker-house + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-06-02-22 + 2025-10-28 + + + + https://stellar.org/community/events/podcast-episode-21 + 2025-10-28 + + + + https://stellar.org/community/events/ethdenver-2023 + 2025-10-28 + + + + https://stellar.org/community/events/techstars-startup-weekend-blockchain-latam-with-visa-and-stellar + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-5-5-2022 + 2025-10-28 + + + + https://stellar.org/community/events/oecd-symposium-on-digitalisation-and-finance-in-asia + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q4-2021-review + 2025-10-28 + + + + https://stellar.org/community/events/scf-11-startup-bootcamp + 2025-10-28 + + + + https://stellar.org/blog/developers/making-international-remittances-easy-with-pay-by-phone + 2025-10-28 + + + + https://stellar.org/blog/developers/developer-spotlights-tommaso-de-ponti-tdep + 2025-10-28 + + + + https://stellar.org/community/events/podcast-episode-17 + 2025-10-28 + + + + https://stellar.org/community/events/nfts-on-stellar + 2025-10-28 + + + + https://stellar.org/community/events/data-analytics-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/developers/reducing-friction-on-the-stellar-network-with-claimable-balances + 2025-10-28 + + + + https://stellar.org/community/events/futurist-conference + 2025-10-28 + + + + https://stellar.org/community/events/dc-blockchain-summit-2 + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-horizon-1-0 + 2025-10-28 + + + + https://stellar.org/blog/developers/demystifying-proof-of-agreement + 2025-10-28 + + + + https://stellar.org/community/events/innovating-colombias-financial-system + 2025-10-28 + + + + https://stellar.org/blog/developers/whats-new-with-stellar-dev-docs + 2025-10-28 + + + + https://stellar.org/community/events/webinar-the-stellar-asset-sandbox-your-toolkit-for-exploring-tokenization + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-19-upgrade-complete + 2025-10-28 + + + + https://stellar.org/community/events/custodial-vs-non-custodial-apps-which-side-are-you-on + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-crea-tu-propio-anchor-con-stellar + 2025-10-28 + + + + https://stellar.org/community/events/paris-blockchain-week-summit + 2025-10-28 + + + + https://stellar.org/community/events/techstars-startup-weekend-blockchain-mexico-with-stellar + 2025-10-28 + + + + https://stellar.org/community/events/reimagina-las-finanzas-con-blockchain + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-2 + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-5-12-2022 + 2025-10-28 + + + + https://stellar.org/community/events/handsonweb3-hosted-by-paradigm + 2025-10-28 + + + + https://stellar.org/community/events/defi-summer-hackathon-by-minority-programmers-association + 2025-10-28 + + + + https://stellar.org/community/events/meridian-2022 + 2025-10-28 + + + + https://stellar.org/community/events/que-es-xlm-lumens-y-stellar + 2025-10-28 + + + + https://stellar.org/blog/developers/how-stellar-org-recovers-from-a-testnet-reset + 2025-10-28 + + + + https://stellar.org/blog/developers/muxed-accounts-faq + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-7-29-21 + 2025-10-28 + + + + https://stellar.org/community/events/from-bootcamp-to-the-stellar-ecosystem + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-operaciones-multifirma-con-stellar + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q3-in-review + 2025-10-28 + + + + https://stellar.org/community/events/stellar-blockchain-meetup-austin-tx + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-10-28-21 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q4-2022-in-review + 2025-10-28 + + + + https://stellar.org/community/events/global-cbdc-challenge-webinar-can-cbdc-truly-enable-financial-inclusion + 2025-10-28 + + + + https://stellar.org/blog/developers/developers-on-stellar-christian-rogobete-soneso + 2025-10-28 + + + + https://stellar.org/community/events/canvas-the-art-of-investigation + 2025-10-28 + + + + https://stellar.org/blog/developers/verifying-the-starbridge-protocol-with-ivy + 2025-10-28 + + + + https://stellar.org/community/events/stellar-blockchain-bootcamp-with-dfs-lab + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-16-upgrade-guide + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-3-3-2022 + 2025-10-28 + + + + https://stellar.org/community/events/new-stellar-ecosystem-standard-for-send-receive-transactions + 2025-10-28 + + + + https://stellar.org/community/events/stellar-sep24-interactive-deposits-withdrawals + 2025-10-28 + + + + https://stellar.org/community/events/consensus-2023 + 2025-10-28 + + + + https://stellar.org/community/events/ucsd-webinar-cryptocurrency-and-central-bank-digital-currency + 2025-10-28 + + + + https://stellar.org/community/events/money20-20-private-breakfast + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-15-upgrade-complete + 2025-10-28 + + + + https://stellar.org/community/events/p2p-financial-systems-international-workshop-panel + 2025-10-28 + + + + https://stellar.org/community/events/zkvm-about-the-motocrab + 2025-10-28 + + + + https://stellar.org/community/events/compliance-and-the-stellar-network-understanding-the-sdf-elliptic-partnershp + 2025-10-28 + + + + https://stellar.org/community/events/fintech-abu-dhabi-can-decentralised-finance-and-compliance-co-exist + 2025-10-28 + + + + https://stellar.org/community/events/code-and-hacks-stellar-highlights-lo-mejor-del-2020 + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-5-6-21 + 2025-10-28 + + + + https://stellar.org/community/events/stanford-future-of-digital-currency-initiative-annual-meeting + 2025-10-28 + + + + https://stellar.org/blog/developers/guide-to-protocol-13-prep + 2025-10-28 + + + + https://stellar.org/community/events/hack-the-system + 2025-10-28 + + + + https://stellar.org/community/events/driving-inclusion-through-compliance + 2025-10-28 + + + + https://stellar.org/community/events/jed-mccaleb-speaks-at-blockdown-2-0 + 2025-10-28 + + + + https://stellar.org/community/events/podcast-episode-26 + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-5-19-2022 + 2025-10-28 + + + + https://stellar.org/community/events/revolutionizing-cross-border-payments-with-stablecoins + 2025-10-28 + + + + https://stellar.org/community/events/european-blockchain-convention + 2025-10-28 + + + + https://stellar.org/blog/developers/stellar-protocol-11-public-network-upgrade + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-quarter-in-review-q3-2022 + 2025-10-28 + + + + https://stellar.org/community/events/turing-complete-contract-proposal-for-stellar + 2025-10-28 + + + + https://stellar.org/blog/developers/rethinking-and-relaunching-the-demo-wallet + 2025-10-28 + + + + https://stellar.org/community/events/fireside-chat-with-jed-mccaleb + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-1-28-2021 + 2025-10-28 + + + + https://stellar.org/community/events/gbbc-blockchain-central-unga + 2025-10-28 + + + + https://stellar.org/community/events/hack-africa-by-encode-club + 2025-10-28 + + + + https://stellar.org/community/events/cowries-cross-border-payment-services-for-nigeria-powered-by-stellar + 2025-10-28 + + + + https://stellar.org/blog/developers/beta-test-our-new-dev-docs + 2025-10-28 + + + + https://stellar.org/community/events/the-future-of-money-innovative-solutions-for-our-transforming-global-economy + 2025-10-28 + + + + https://stellar.org/community/events/paris-fintech-forum + 2025-10-28 + + + + https://stellar.org/community/events/stellar-austria-developers-meetup + 2025-10-28 + + + + https://stellar.org/community/events/hack-the-northeast-beyond + 2025-10-28 + + + + https://stellar.org/community/events/converge + 2025-10-28 + + + + https://stellar.org/community/events/ecosystem-panel-plug-in-to-the-world-of-stellar-assets + 2025-10-28 + + + + https://stellar.org/blog/developers/check-out-our-new-docs + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-4-28-2022 + 2025-10-28 + + + + https://stellar.org/blog/developers/building-on-soroban-three-teams-journeys-smart-contracts + 2025-10-28 + + + + https://stellar.org/community/events/gbbcs-virtual-blockchain-central-davos-vision-vs-reality-what-inclusion-means + 2025-10-28 + + + + https://stellar.org/blog/developers/why-quorums-matter-and-how-stellar-approaches-them + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-9-9-21 + 2025-10-28 + + + + https://stellar.org/blog/developers/accelerating-analytics-on-stellar + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-2-3-2022 + 2025-10-28 + + + + https://stellar.org/community/events/paris-blockchain-week-summit-2 + 2025-10-28 + + + + https://stellar.org/community/events/sxsw-finance-3-0-summit + 2025-10-28 + + + + https://stellar.org/community/events/north-american-bitcoin-conference-panel-tomorrows-defi + 2025-10-28 + + + + https://stellar.org/community/events/meridian-2020 + 2025-10-28 + + + + https://stellar.org/community/events/code-hacks-casos-de-uso-en-stellar-saldo + 2025-10-28 + + + + https://stellar.org/blog/developers/kelp-why-we-built-it-the-liquidity-problem + 2025-10-28 + + + + https://stellar.org/community/events/sdf-end-of-the-year-catch-up-cloudflare + 2025-10-28 + + + + https://stellar.org/community/events/consensus-2021 + 2025-10-28 + + + + https://stellar.org/blog/developers/messing-around-with-multi-sig + 2025-10-28 + + + + https://stellar.org/community/events/world-economic-forum-2 + 2025-10-28 + + + + https://stellar.org/community/events/the-global-fintech-warming-democratizing-finance-with-blockchain + 2025-10-28 + + + + https://stellar.org/community/events/open-protocol-discussion-06-23-22 + 2025-10-28 + + + + https://stellar.org/community/events/payments-canada-summit-panel + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q2-2021-review + 2025-10-28 + + + + https://stellar.org/blog/developers/building-on-soroban-analyzing-soroban-vs-ethereum-solana + 2025-10-28 + + + + https://stellar.org/community/events/stellar-community-fund-project-pitches-2 + 2025-10-28 + + + + https://stellar.org/community/events/ama-with-sdfs-new-developer-advocates + 2025-10-28 + + + + https://stellar.org/community/events/digital-currency-conference + 2025-10-28 + + + + https://stellar.org/blog/developers/how-to-embed-stellar-services-in-third-party-software-with-sep-24 + 2025-10-28 + + + + https://stellar.org/community/events/stellar-community-fund-project-pitches-3 + 2025-10-28 + + + + https://stellar.org/community/events/meridian-2023 + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-invests-5-million-in-abra + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-invests-3-million-in-tribal-credit + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-releases-first-quarterly-review + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-announces-meridian-2020-global-connections-to-solve-real-world-challenges + 2025-10-28 + + + + https://stellar.org/press/sdf-appoints-denelle-dixon-as-executive-director-and-ceo + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-announces-exclusive-partnership-with-elliptic + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-announces-enterprise-fund-investment-in-satoshipay + 2025-10-28 + + + + https://stellar.org/press/new-stellar-anchor-platform-bridges-businesses-to-the-blockchain-and-to-a-global-network-of-wallets-and-exchanges + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-ecurrency-and-anz-shortlisted-in-g20-techsprint-hosted-by-bis-and-bank-of-indonesia + 2025-10-28 + + + + https://stellar.org/press/stellar-blockchain-now-available-on-samsung-galaxy-smartphones + 2025-10-28 + + + + https://stellar.org/press/bitbond-and-bankhaus-von-der-heydt-issue-euro-stablecoin-eurb-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/press/sdf-announces-strategic-investment-in-dstoq + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-announces-two-new-members-to-board-of-directors + 2025-10-28 + + + + https://stellar.org/blog/developers/stellar-network-upgrade + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/ibm-klickex-partnership + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/build-challenge + 2025-10-28 + + + + https://stellar.org/blog/developers/tokens-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/global-partnerships + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-consensus-protocol-proof-code + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/roadmap + 2025-10-28 + + + + https://stellar.org/blog/developers/multisig-and-simple-contracts-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/sdf-statement-november-29-email + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/why-btc-giveaway + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/bitcoin-claim-lumens + 2025-10-28 + + + + https://stellar.org/blog/policy/3-common-misconceptions-about-ico-law + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/announcing-first-stellar-auction + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/introducing-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-need-for-equality + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/understanding-initial-coin-offerings + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/introducing-stellar-partnership-grant-program + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/bitcoin-claim-lumens-2 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/safety-liveness-and-fault-tolerance-consensus-choice + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/lightyear-announcement + 2025-10-28 + + + + https://stellar.org/blog/developers/removal-of-partial-payments + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/decentralized-to-the-core + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-outage-9-20-14 + 2025-10-28 + + + + https://stellar.org/blog/developers/upgraded-network-is-here + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/using-stellar-for-ico + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/usdc-stellar-faq + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/first-year + 2025-10-28 + + + + https://stellar.org/blog/developers/zkvm-a-new-design-for-fast-confidential-smart-contracts + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-build-challenge-7-results + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/how-to-succeed-at-the-stellar-community-fund + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-11-improvements-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/bringing-lumens-to-millions + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-right-time-for-open-source + 2025-10-28 + + + + https://stellar.org/blog/policy/the-state-of-stablecoins + 2025-10-28 + + + + https://stellar.org/blog/policy/stellar-receives-sharia-compliance-certification-transfers-tokenization + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/equitable-access-requires-financial-literacy + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/take-our-ecosystem-survey + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-story-of-2020-lighting-up-the-stellar-map + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/sdfs-next-steps + 2025-10-28 + + + + https://stellar.org/blog/policy/blockchain-policy-getting-it-right + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/celebrating-womens-history-month + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/introducing-stellar-community-fund-2-0 + 2025-10-28 + + + + https://stellar.org/blog/policy/sdfs-comment-letter-on-fincen-nprm + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-the-7th-stellar-build-challenge + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/introducing-meridian + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-security-guide-protect-scammers + 2025-10-28 + + + + https://stellar.org/blog/developers/a-fresh-update-to-stellar-account-viewer + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/how-we-see-the-stable-act + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-coinbase-earn + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/achieving-network-effects-on-an-open-network + 2025-10-28 + + + + https://stellar.org/blog/policy/compliance-the-stellar-network + 2025-10-28 + + + + https://stellar.org/blog/developers/the-importance-of-key-management-recovery + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/cowries-cross-border-payment-services-for-nigeria-powered-by-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/usdc-is-coming-to-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/keybase-stellar-lumens-spacedrop + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/why-im-joining-stellar + 2025-10-28 + + + + https://stellar.org/blog/policy/sdf-joins-the-blockchain-association + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/association-of-ukrainian-banks-admits-sdf-as-member + 2025-10-28 + + + + https://stellar.org/blog/developers/should-we-increase-the-minimum-fee + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-stellar-protocol-15 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-fund-seed-finalists-litemint-and-optionblox-yieldblox + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/announcing-the-new-stellar-org + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/why-ibm-built-world-wire-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-seed-fund-no1-wrap-up + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/keybase-and-stellar-partnership + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/together-we-can-a-decentralized-vision-for-financial-inclusion + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/announcing-the-new-stellar-logo + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/women-building-in-blockchain-a-spotlight-on-sdfs-technical-talent + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-seed-fund-finalists-mojoflower-and-blocknify + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-anchor-basics-webinar-recap + 2025-10-28 + + + + https://stellar.org/blog/policy/sdfs-supplemental-comment-letter-on-fincens-nprm + 2025-10-28 + + + + https://stellar.org/blog/policy/regulating-virtual-assets-vasps + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/sdf-donation-matching-program + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/q1-2018-stellar-and-state-of-crypto + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/sdf-onstage-at-consensus-distributed + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/sdf-ecosystem-updates-lumen-distribution-programs + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-14-improvements + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/sixth-stellar-build-challenge-results + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/sdf-ecosystem-updates-developer-programs + 2025-10-28 + + + + https://stellar.org/blog/policy/drive-inclusion-through-compliance + 2025-10-28 + + + + https://stellar.org/blog/policy/sdf-at-imf-world-bank-annual-meetings + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stably-makes-financial-transactions-cheaper-faster-and-more-transparent-with-usdc + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-2019-strategy + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/sdf-at-consensus-distributed + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/2018-ecosystem-updates-community-vision-part-1 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/where-we-are-headed + 2025-10-28 + + + + https://stellar.org/blog/developers/issuer-enforced-finality-explained + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/how-wyre-enables-businesses-to-build-on-stellar-with-usdc + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/introducing-stellar-orgs-new-features + 2025-10-28 + + + + https://stellar.org/blog/developers/key-management-101 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/registration-is-now-live-for-meridian-2020 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/our-proposal-to-disable-inflation + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/on-worldwide-consensus + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/fiat-on-off-ramps-and-cross-border-payments-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/finclusive-helps-businesses-stay-compliant-and-launches-payments-in-usdc + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/2-years-in-building-resilience-as-we-grow + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-2018-strategy + 2025-10-28 + + + + https://stellar.org/blog/developers/sep-30-recoverysigner-user-friendly-key-management + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/looking-back-on-the-stellar-community-fund-with-leaf-global-fintech + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/supporting-ukrainian-financial-institutions-to-leverage-blockchain + 2025-10-28 + + + + https://stellar.org/blog/developers/diving-into-energy-use-on-stellar-blockchain-payment-efficiency-examined + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/latam-crypto-exchange-bitso-to-integrate-stellar-usdc + 2025-10-28 + + + + https://stellar.org/blog/developers/using-protocol-17s-asset-clawback + 2025-10-28 + + + + https://stellar.org/blog/developers/stablecoins-the-future-of-digital-money + 2025-10-28 + + + + https://stellar.org/blog/developers/custodial-vs-non-custodial-apps-which-side-are-you-on + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/expanding-financial-access-through-stellar-a-conversation-with-business-reporter + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/looking-back-on-the-stellar-community-fund-with-litemint + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/tala-visa-partner-on-solution-for-underbanked-supported-by-circle-and-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/usdcs-expansion-across-the-stellar-ecosystem + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/from-cross-border-payments-to-unified-wallets-african-startups-are-building-the-future-of-finance + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/arf-and-tempo-launch-new-stellar-based-payments-from-europe-to-the-philippines + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/leveraging-on-chain-assets-to-support-the-stellar-ecosystem + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-13-improvements + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stablecorp-and-versabank-issue-canadian-stablecoin-vcad-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/a-look-back-on-meridian-2021 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/register-for-meridian-2021-build-locally-impact-globally + 2025-10-28 + + + + https://stellar.org/blog/developers/protocol-17-improvements + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/protecting-value-and-staying-compliant-with-asset-clawback + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/meridian-2020-recap + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/announcing-the-matching-fund + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/moneygram-international-launches-a-new-pilot-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/developers/talk-that-tech-learn-from-sdfs-engineering-team + 2025-10-28 + + + + https://stellar.org/blog/developers/amms-in-the-stellar-ecosystem + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/results-are-out-the-dfs-blockchain-bootcamp + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-stellar-development-foundation-launches-initiative-to-accelerate-blockchain-education + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/tala-joins-the-stellar-ecosystem-to-build-out-digital-banking-capabilities + 2025-10-28 + + + + https://stellar.org/blog/developers/stellar-quest-series-4-is-now-live + 2025-10-28 + + + + https://stellar.org/blog/developers/building-speedex-a-novel-design-for-decentralized-exchanges + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-story-of-2021 + 2025-10-28 + + + + https://stellar.org/blog/developers/starlight-a-layer-2-payment-channel-protocol-for-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-2022-strategy + 2025-10-28 + + + + https://stellar.org/blog/developers/smart-contracts-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/policy/the-cbdc-guidebook-for-regulators-and-policymakers + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-enterprise-fund-invests-15-million-in-airtm + 2025-10-28 + + + + https://stellar.org/press/statement-on-halted-sdf-validators + 2025-10-28 + + + + https://stellar.org/press/leading-asian-crypto-exchange-liquid-enables-stellar-usdc + 2025-10-28 + + + + https://stellar.org/press/ukrainian-ministry-of-digital-transformation-to-develop-virtual-assets-and-to-facilitate-cbdc-infrastructure-with-the-stellar-development-foundation + 2025-10-28 + + + + https://stellar.org/press/centre-consortium-announces-stellar-as-an-official-chain-for-usdc + 2025-10-28 + + + + https://stellar.org/press/usdc-is-live-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-enterprise-fund-invests-in-nigeria-based-cowrie + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-invests-5-million-in-wyre + 2025-10-28 + + + + https://stellar.org/press/vottun-to-develop-payment-solution-in-spanish-regulatory-sandbox-with-support-from-pwc-stellar-development-foundation + 2025-10-28 + + + + https://stellar.org/press/wyre-announces-new-savings-api-powered-by-the-stellar-network + 2025-10-28 + + + + https://stellar.org/press/okcoin-becomes-first-us-exchange-to-integrate-stellar-usdc + 2025-10-28 + + + + https://stellar.org/press/automated-market-maker-functionality-is-live-on-stellar + 2025-10-28 + + + + https://stellar.org/press/ukraine-electronic-hryvnia-pilot-launched-by-tascombank-and-bitt-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/advancing-diversity-equity-and-inclusion-with-intention + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q4-2021-in-review + 2025-10-28 + + + + https://stellar.org/blog/policy/parliamentarians-the-world-over-converged-on-panama-city-here-is-what-we-told-them + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-fund-3-0 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q2-2022-in-review + 2025-10-28 + + + + https://stellar.org/blog/policy/fincens-proposed-digital-asset-transaction-rule + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/leading-crypto-custodian-bitgo-now-supports-stellar-usdc + 2025-10-28 + + + + https://stellar.org/blog/policy/the-time-for-stablecoin-legislation-is-now + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q1-2022-in-review + 2025-10-28 + + + + https://stellar.org/blog/policy/the-responsible-financial-innovation-act-a-step-towards-clear-rules-of-the-road + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-2021-strategy + 2025-10-28 + + + + https://stellar.org/blog/policy/the-case-for-cbdcs-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-stellar-development-foundation-is-certified-as-a-great-place-to-work + 2025-10-28 + + + + https://stellar.org/blog/policy/sdf-on-capitol-hill + 2025-10-28 + + + + https://stellar.org/blog/policy/can-a-cbdc-promote-financial-inclusion + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/best-of-both-worlds-stellar-usdc + 2025-10-28 + + + + https://stellar.org/blog/developers/fast-forwarding-stellar-amms-liquidity-pools + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/scf-finalists-coinqvest-and-anclap + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/celebrating-one-year-of-usdc-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-automated-market-makers-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-seed-fund-finalists-leaf-global-fintech-and-task-io + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/break-into-blockchain-with-the-women-of-sdf-pt-2 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/a-taste-of-the-future-finance-3-0-at-sxsw + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/break-into-blockchain-with-the-women-of-sdf-pt-1 + 2025-10-28 + + + + https://stellar.org/blog/policy/sdfs-statement-to-the-biden-administrations-first-ever-digital-asset-executive-order + 2025-10-28 + + + + https://stellar.org/blog/developers/smart-contracts-on-stellar-why-now + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-at-sxsw-2022 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/newton-goes-multichain-with-stellar-usdc + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/announcing-the-newest-stellar-advocacy-program-stellar-campus-experts + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-protocol-19 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/break-into-blockchain-with-the-women-of-sdf-pt-4 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/building-stellar-infrastructure-in-africa + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/break-into-blockchain-with-the-women-of-sdf-pt-3 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/entering-the-mainstream-cryptocurrency-adoption-in-2022 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/calling-all-female-founders-apply-to-the-stellar-matching-fund + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/three-is-a-magic-number + 2025-10-28 + + + + https://stellar.org/blog/developers/launch-your-blockchain-education-with-stellar-quest-learn + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/exploring-real-world-problems-and-solutions-at-our-next-blockchain-bootcamp + 2025-10-28 + + + + https://stellar.org/blog/developers/project-jump-cannon-choosing-wasm + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/how-winning-the-stellar-community-fund-opened-new-doors-for-alfred-pay + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/enter-our-meridian-2022-design-contest + 2025-10-28 + + + + https://stellar.org/blog/developers/project-jump-cannon-soroban-preview-release + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-stellar-ramps-access-stellars-on-off-ramps-with-a-single-integration + 2025-10-28 + + + + https://stellar.org/blog/developers/freighter-wallet-evolves + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-announces-new-methodology-to-measure-environmental-impact-of-blockchain-technology-and-releases-findings-on-stellar-network-electricity-consumption + 2025-10-28 + + + + https://stellar.org/blog/policy/response-to-the-treasury-on-digital-assets + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/bitwage-helps-remote-workers-get-paid-with-usdc-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/offsetting-your-carbon-footprint-with-blockchain-technology + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-marketing-grants-programs-first-million + 2025-10-28 + + + + https://stellar.org/blog/developers/experiment-with-payment-channels-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/register-for-meridian-2022-the-urgency-of-doing + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/how-to-protect-yourself-from-scammers + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/ledger-live-integrates-usdc-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q2-in-review + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q2-in-review-2 + 2025-10-28 + + + + https://stellar.org/blog/policy/digital-assets-and-the-future-of-finance-testifying-before-the-us-house-committee-on-financial-services + 2025-10-28 + + + + https://stellar.org/blog/developers/soroban-a-new-smart-contract-standard + 2025-10-28 + + + + https://stellar.org/blog/policy/whats-in-a-name-why-consumers-need-stablecoin-standards + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/scf-winner-link-doubles-down-on-digital-financial-infrastructure + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/understanding-the-impacts-of-blockchain-technology + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q3-in-review-2 + 2025-10-28 + + + + https://stellar.org/blog/developers/the-dawn-of-sorobanathon + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q4-in-review + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q1-2021-in-review + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q4-2022-in-review + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q1-in-review + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q3-in-review + 2025-10-28 + + + + https://stellar.org/blog/developers/a-look-back-and-a-look-forward-for-2020 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-bridge-bounty-program-grants-up-to-5-million-to-finalists + 2025-10-28 + + + + https://stellar.org/blog/developers/an-ode-to-lumenauts-the-thanksgiving-special + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q3-2022-in-review + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/meridian-2022-you-ask-we-answer + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/where-financial-access-and-financial-freedom-meet + 2025-10-28 + + + + https://stellar.org/blog/policy/stellar-spotlighted-on-capitol-hill + 2025-10-28 + + + + https://stellar.org/blog/developers/one-giant-leap-for-stellar-quest-learn + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/how-scf-winner-beans-is-perfecting-payments + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/how-transparent-environmental-sustainability-is-good-for-business-and-blockchain + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/ciao-meridian-2022 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/meridian-2020-global-connection-to-solve-real-world-challenges + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/where-ideas-come-to-life-why-im-excited-about-meridian + 2025-10-28 + + + + https://stellar.org/blog/developers/starbridge-a-trust-minimized-bridge-between-stellar-and-other-blockchains + 2025-10-28 + + + + https://stellar.org/blog/developers/the-stellar-anchor-platform-connecting-to-stellar-made-simple + 2025-10-28 + + + + https://stellar.org/blog/developers/announcing-stellar-quest-live-series-5-soroban + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/how-scf-winner-vitreous-makes-fundraising-transparent-with-blockchain + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-and-moneygram-partner-with-techstars-to-spur-innovation-on-blockchain-based-cross-border-payments + 2025-10-28 + + + + https://stellar.org/press/mercado-bitcoin-integrates-usdc-on-stellar + 2025-10-28 + + + + https://stellar.org/press/soroban-smart-contracts-platform-native-to-the-stellar-network-is-live-on-futurenet + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-announces-meridian-2022-the-urgency-of-doing + 2025-10-28 + + + + https://stellar.org/press/tribal-credit-bitso-stellar-development-foundation-partner-to-enable-faster-cheaper-international-b2b-payments-in-latam + 2025-10-28 + + + + https://stellar.org/press/coinme-announces-usdc-powered-global-borderless-digital-cash-and-p2p-payments + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-launches-30-million-fund-to-power-startups-leveraging-blockchain + 2025-10-28 + + + + https://stellar.org/press/moneygram-announces-innovative-partnership-with-the-stellar-development-foundation-to-utilize-blockchain-technology + 2025-10-28 + + + + https://stellar.org/press/fintech-stax-gets-additional-backing-from-stellar-development-foundation-to-build-affordable-borderless-payment-solution-in-africa + 2025-10-28 + + + + https://stellar.org/press/gmo-z-com-trust-company-announces-gyen-and-zusd-stablecoins-to-launch-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-appoints-plaids-ginger-baker-to-board-of-directors + 2025-10-28 + + + + https://stellar.org/press/nium-and-stellar-development-foundation-partner-to-enable-payouts-in-190-countries + 2025-10-28 + + + + https://stellar.org/press/dfs-lab-and-stellar-development-foundation-launch-second-blockchain-bootcamp-for-african-startups + 2025-10-28 + + + + https://stellar.org/press/wirex-expands-usdc-support-to-the-stellar-network + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-invests-3-million-in-settle-network + 2025-10-28 + + + + https://stellar.org/press/stably-launches-stablecoin-on-off-ramps-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/press/whitebit-integrates-usdc-on-stellar + 2025-10-28 + + + + https://stellar.org/press/novatti-to-launch-aud-stablecoin-leveraging-stellar-blockchain + 2025-10-28 + + + + https://stellar.org/press/2022-research-reveals-high-awareness-and-growing-cross-border-use-of-cryptocurrency-in-four-key-markets + 2025-10-28 + + + + https://stellar.org/press/mercado-bitcoin-partners-with-stellar-development-foundation-in-the-lift-challenge-real-digital-by-the-central-bank-of-brazil-and-fenasbac + 2025-10-28 + + + + https://stellar.org/press/moneygram-launches-pioneering-global-crypto-to-cash-service-on-the-stellar-network + 2025-10-28 + + + + https://stellar.org/press/expansion-of-usdc-on-stellar-continues-with-shift-markets-and-nexus-markets-integration + 2025-10-28 + + + + https://stellar.org/press/dfs-lab-and-stellar-development-foundation-launch-blockchain-bootcamp-for-african-startups + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-enterprise-fund-invests-10-million-in-netxd + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-enterprise-fund-invests-5-million-in-wave + 2025-10-28 + + + + https://stellar.org/press/flutterwave-enables-new-europe-africa-payment-corridors-via-stellar + 2025-10-28 + + + + https://stellar.org/press/bitt-and-stellar-development-foundation-selected-as-finalists-for-the-global-cbdc-challenge + 2025-10-28 + + + + https://stellar.org/press/novatti-announces-audd-stablecoin-to-go-live-on-november-1 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/a-synopsis-of-sorobanathon-first-light + 2025-10-28 + + + + https://stellar.org/blog/developers/how-to-use-a-wallet-employing-stellar-aid-assist + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/pioneering-the-future-of-aid-delivery + 2025-10-28 + + + + https://stellar.org/blog/developers/a-retrospective-on-stellar-quest-live-series-5 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-story-of-2022 + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-fund-4-0-bigger-better-faster-soroban + 2025-10-28 + + + + https://stellar.org/blog/developers/a-developers-guide-to-soroban-adoption-fund-programs + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/sdf-x-dfs-lab-blockchain-startup-camps-are-here + 2025-10-28 + + + + https://stellar.org/blog/developers/stellar-quest-live-series-6-fast-cheap-out-of-control + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/bringing-stellar-aid-assist-to-a-global-forum + 2025-10-28 + + + + https://stellar.org/blog/developers/app-for-issuing-tokens-assets-on-stellar-testnet + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-2023-strategy + 2025-10-28 + + + + https://stellar.org/blog/policy/representing-blockchain-on-the-commodity-futures-trading-commissions-global-market-advisory-committee + 2025-10-28 + + + + https://stellar.org/blog/developers/introducing-sorobanathon-equinox + 2025-10-28 + + + + https://stellar.org/blog/policy/sdfs-policy-priorities-for-2023 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellars-vision-for-an-interoperable-future + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-founder-stories-tori-samples-co-founder-cto-leaf-global-fintech + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/experience-benefits-usdc-on-stellar-coinme + 2025-10-28 + + + + https://stellar.org/blog/developers/soroban-stellar-love + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/founder-journeys-ruben-galindo-steckel-co-founder-ceo-airtm + 2025-10-28 + + + + https://stellar.org/blog/policy/stellar-development-foundation-digital-assets-research-development-rfi-white-house-office-science-technology-policy + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/blockchain-remittances-a-game-changer-for-cross-border-transfers + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/founder-journeys-thomaz-texeira-co-founder-ceo-ntokens + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q1-2023-in-review + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/paris-blockchain-week-recap-stellar-advances-real-world-use-cases-for-blockchain + 2025-10-28 + + + + https://stellar.org/blog/developers/tech-talks-with-tomer-smart-contracts-and-interoperability-with-axelar-co-founder-sergey-gorbunov + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-fund-recap-financial-innovation-powered-soroban + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/when-blockchain-meets-humanitarian-aid + 2025-10-28 + + + + https://stellar.org/blog/policy/engaging-uk-policymakers-and-regulators-parliament + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-fund-soroban-submission-best-practices + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/a-consensus-2023-recap-real-world-meets-blockchain + 2025-10-28 + + + + https://stellar.org/blog/developers/soroban-the-high-performance-smart-contract-platform-built-for-developers + 2025-10-28 + + + + https://stellar.org/blog/developers/anchor-directory-guide-finding-interoperable-asset-issuers-on-off-ramps-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/building-developer-tools-in-a-cross-chain-world-insights-from-the-cubist-co-founders + 2025-10-28 + + + + https://stellar.org/blog/developers/empowering-global-defi-innovators-recap-scf-startup-camp + 2025-10-28 + + + + https://stellar.org/blog/policy/global-consensus-stablecoins-within-reach + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/block-by-block-with-denelle-dixon-what-financial-markets-crypto-investing-and-horses-have-in-common-ft-joey-krug + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/coinbase-integrates-with-usdc-on-stellar + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-appoints-asiff-hirji-of-moonpay-to-board-of-directors + 2025-10-28 + + + + https://stellar.org/press/arf-solving-liquidity-constraints-in-cross-border-payments-with-support-from-stellar-development-foundation + 2025-10-28 + + + + https://stellar.org/press/stellar-blockchain-technology-powers-international-rescue-committees-cash-based-humanitarian-assistance-for-conflict-affected-people-in-ukraine + 2025-10-28 + + + + https://stellar.org/press/unhcr-launches-pilot-cash-based-intervention-using-blockchain-technology-for-humanitarian-payments-to-people-displaced-and-impacted-by-the-war-in-ukraine + 2025-10-28 + + + + https://stellar.org/press/coinme-integrates-usdc-on-stellar-enabling-borderless-digital-cash-and-p2p-payments + 2025-10-28 + + + + https://stellar.org/press/tascombank-hryvnia-nominated-electronic-money-pilot-report-recommends-adoption-of-blockchain-to-transform-the-payment-landscape-of-ukraine + 2025-10-28 + + + + https://stellar.org/press/stellar-community-fund-kicks-off-10m-open-application-funding-program-for-soroban-projects + 2025-10-28 + + + + https://stellar.org/press/merkle-science-joins-forces-with-stellar-development-foundation-to-enhance-web3-risk-mitigation-and-compliance-capabilities + 2025-10-28 + + + + https://stellar.org/press/cebuana-lhullier-integrates-with-stellar-for-fast-cross-border-payments-to-the-philippines + 2025-10-28 + + + + https://stellar.org/press/franklin-templeton-announces-the-franklin-onchain-u-s-government-money-fund-surpasses-270-million-in-assets-under-management + 2025-10-28 + + + + https://stellar.org/press/stellar-development-foundation-announces-meridian-2023-unlocking-human-potential + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/and-then-it-was-four-ceo-anniversary + 2025-10-28 + + + + https://stellar.org/blog/developers/soroban-security-bug-bounty + 2025-10-28 + + + + https://stellar.org/blog/developers/blockchain-tool-aid-disbursements + 2025-10-28 + + + + https://stellar.org/community/events/proof-of-talk + 2025-10-28 + + + + https://stellar.org/blog/developers/sorobans-technical-design-decisions-learnings-from-ethereum + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/founder-journeys-mary-saracco-founder-of-latamex + 2025-10-28 + + + + https://stellar.org/community/events/inclusive-fintech-forum + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/the-poll-that-made-us-spiral-on-and-off-ramps + 2025-10-28 + + + + https://stellar.org/community/events/scf-project-pitches-4-june-23-batch + 2025-10-28 + + + + https://stellar.org/blog/developers/how-wallets-can-integrate-with-global-on-and-off-ramps-on-stellar + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/meridian-2023-unlocking-human-potential + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/block-by-block-with-denelle-dixon-why-the-regulatory-fight-for-crypto-in-the-us-is-worth-it-ft-carole-house-of-terranet-ventures + 2025-10-28 + + + + https://stellar.org/blog/policy/mica-implementation-off-to-the-races + 2025-10-28 + + + + https://stellar.org/blog/policy/european-union-eu-data-act-smart-contracts-at-risk + 2025-10-28 + + + + https://stellar.org/blog/developers/proposed-changes-to-transaction-submission + 2025-10-28 + + + + https://stellar.org/community/events/token2049 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/building-smart-contracts-faster-talking-efficient-development-w-aha-labs-co-founder-chad-o + 2025-10-28 + + + + https://stellar.org/blog/ecosystem/stellar-community-fund-recap-soroban-infrastructure + 2025-10-28 + + + + https://stellar.org/community/events/scf-project-pitches-5-july-23-batch + 2025-10-28 + + + + https://stellar.org/community/events/stellar-development-foundation-q2-2023-in-review + 2025-10-28 + + + + https://stellar.org/blog/developers/not-all-data-is-equal-how-soroban-is-solving-state-bloat-with-state-expiration + 2025-10-28 + + + + https://stellar.org/press/allbridge-launch-connects-stellar-network-to-ethereum-solana-and-polygon + 2025-10-28 + + + + https://stellar.org/community/events/moneygram-ramps-marketing-awards-program-for-wallets-and-fintechs-webinar + 2025-10-28 + + + + https://stellar.org/community/events/aidex-2023 + 2025-10-28 + + + + https://stellar.org/blog/developers/beyond-the-blockchain-unlocking-the-power-of-analytics-with-hubble + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/bridging-the-financial-services-gap-for-the-unbanked-how-stellar-ramps-make-finance-more-inclusive + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/updates-and-reminders-from-your-sdf-security-team + 2025-10-28 + + + + https://stellar.org/blog/developers/soroban-rpciege-recap + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/stellar-development-foundation-q2-2023-in-review + 2025-10-28 + + + + https://stellar.org/blog/developers/building-sorobans-minimum-viable-ecosystem-pt-2 + 2025-10-28 + + + + https://stellar.org/blog/developers/essential-guide-rust-development + 2025-10-28 + + + + https://stellar.org/community/events/mainnet-2023 + 2025-10-28 + + + + https://stellar.org/community/events/federal-reserve-bank-of-philadelphia-fintech-conference + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/bigger-is-better-why-stellar-is-the-leader-in-cash-to-crypto-on-and-off-ramps + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/tech-talks-with-tomer-meta-anchor-arf-impact-on-cross-border-finance-kazim-ozyilmaz + 2025-10-28 + + + + https://stellar.org/community/events/scf-project-pitches-6-august-batch + 2025-10-28 + + + + https://stellar.org/blog/developers/developers-guide-soroban-adoption-fund-programs-pt-2 + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/meridian-2023-design-contest + 2025-10-28 + + + + https://stellar.org/blog/developers/learn-soroban-as-easy-as-1-2-3-with-community-made-tooling + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/sdfs-investment-in-moneygram-international + 2025-10-28 + + + + https://stellar.org/blog/developers/sorobans-fee-structure-contributes-stellar-network-scalability + 2025-10-28 + + + + https://stellar.org/blog/foundation-news/bringing-more-blockchain-to-the-bytecode-alliance + 2025-10-28 + + + + \ No newline at end of file diff --git a/src/components/AudioPlayer.tsx b/src/components/AudioPlayer.tsx new file mode 100644 index 0000000000..8e9054f0cc --- /dev/null +++ b/src/components/AudioPlayer.tsx @@ -0,0 +1,39 @@ +import React from "react"; + +type AudioBadgeProps = { + src: string; +}; + +export default function AudioBadge({ src }: AudioBadgeProps) { + return ( +
+
+ 🎧 Audio Recording +
+ + +
+ ); +} diff --git a/src/components/DriveVideo.tsx b/src/components/DriveVideo.tsx new file mode 100644 index 0000000000..db8e54d8d7 --- /dev/null +++ b/src/components/DriveVideo.tsx @@ -0,0 +1,39 @@ +import React from "react"; + +type Props = { + ID: string; +}; + +export default function DrivePreviewCard({ ID }: Props) { + const src = `https://drive.google.com/file/d/${ID}/preview`; + + return ( +
+